diff options
Diffstat (limited to 'include/linux')
63 files changed, 2708 insertions, 563 deletions
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index c1da539f5e28..0ec9bdb1cc9f 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -11,6 +11,7 @@ #include <linux/uio.h> #include <net/sock.h> #include <linux/atomic.h> +#include <linux/refcount.h> #include <uapi/linux/atmdev.h> #ifdef CONFIG_PROC_FS @@ -158,7 +159,7 @@ struct atm_dev { struct k_atm_dev_stats stats; /* statistics */ char signal; /* signal status (ATM_PHY_SIG_*) */ int link_rate; /* link rate (default: OC3) */ - atomic_t refcnt; /* reference count */ + refcount_t refcnt; /* reference count */ spinlock_t lock; /* protect internal members */ #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_entry; /* proc entry */ @@ -254,20 +255,20 @@ static inline void atm_return(struct atm_vcc *vcc,int truesize) static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) { - return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) < + return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) < sk_atm(vcc)->sk_sndbuf; } static inline void atm_dev_hold(struct atm_dev *dev) { - atomic_inc(&dev->refcnt); + refcount_inc(&dev->refcnt); } static inline void atm_dev_put(struct atm_dev *dev) { - if (atomic_dec_and_test(&dev->refcnt)) { + if (refcount_dec_and_test(&dev->refcnt)) { BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); if (dev->ops->dev_close) dev->ops->dev_close(dev); diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h new file mode 100644 index 000000000000..c893b9520a67 --- /dev/null +++ b/include/linux/avf/virtchnl.h @@ -0,0 +1,701 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _VIRTCHNL_H_ +#define _VIRTCHNL_H_ + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the drivers for all devices starting from our 40G product line + * + * Admin queue buffer usage: + * desc->opcode is always aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * The Firmware copies the cookie fields when sending messages between the + * PF and VF, but uses all other fields internally. Due to this limitation, + * we must send all messages as "indirect", i.e. using an external buffer. + * + * All the VSI indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value + * is of status_code type, defined in the shared type.h. + * + * In general, VF driver initialization should roughly follow the order of + * these opcodes. The VF driver must first validate the API version of the + * PF driver, then request a reset, then get resources, then configure + * queues and interrupts. After these operations are complete, the VF + * driver may start its queues, optionally add MAC and VLAN filters, and + * process traffic. + */ + +/* START GENERIC DEFINES + * Need to ensure the following enums and defines hold the same meaning and + * value in current and future projects + */ + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_NOT_SUPPORTED = -64, +}; + +#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 +#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 +#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 +#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 +#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 +#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 + +enum virtchnl_link_speed { + VIRTCHNL_LINK_SPEED_UNKNOWN = 0, + VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), + VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), + VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), + VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), + VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), + VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), +}; + +/* for hsplit_0 field of Rx HMC context */ +/* deprecated with AVF 1.0 */ +enum virtchnl_rx_hsplit { + VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, + VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, + VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, + VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, + VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, +}; + +/* END GENERIC DEFINES */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum virtchnl_ops { +/* The PF sends status change events to VFs using + * the VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + * Use of "advanced opcode" features must be negotiated as part of capabilities + * exchange and are not considered part of base mode feature set. + */ + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_GET_STATS = 15, + VIRTCHNL_OP_RSVD = 16, + VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HENA = 26, +}; + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); + +/* Message descriptions and data structures.*/ + +/* VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define VIRTCHNL_VERSION_MAJOR 1 +#define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct virtchnl_version_info { + u32 major; + u32 minor; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); + +#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) +#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) + +/* VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV + * vsi_type should always be 6 for backward compatibility. Add other fields + * as needed. + */ +enum virtchnl_vsi_type { + VIRTCHNL_VSI_TYPE_INVALID = 0, + VIRTCHNL_VSI_SRIOV = 6, +}; + +/* VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * virtchnl_vf_resource and one or more + * virtchnl_vsi_resource structures. + */ + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum virtchnl_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); + +/* VF offload flags + * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including + * TX/RX Checksum offloading and TSO for non-tunnelled packets. + */ +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 + +#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_offload_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); + +/* VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; /* deprecated with AVF 1.0 */ + u64 dma_ring_addr; + u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); + +/* VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; /* deprecated with AVF 1.0 */ + u32 databuffer_size; + u32 max_pkt_size; + u32 pad1; + u64 dma_ring_addr; + enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + u32 pad2; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); + +/* VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); + +struct virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + u32 pad; + struct virtchnl_queue_pair_info qpair[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + +/* VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); + +struct virtchnl_irq_map_info { + u16 num_vectors; + struct virtchnl_vector_map vecmap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); + +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); + +/* VIRTCHNL_OP_ADD_ETH_ADDR + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* VIRTCHNL_OP_DEL_ETH_ADDR + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); + +/* VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct eth_stats in an external buffer. + */ + +/* VIRTCHNL_OP_CONFIG_RSS_KEY + * VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); + +struct virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); + +/* VIRTCHNL_OP_GET_RSS_HENA_CAPS + * VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + */ +struct virtchnl_rss_hena { + u64 hena; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; + +#define PF_EVENT_SEVERITY_INFO 0 +#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct virtchnl_pf_event { + enum virtchnl_event_codes event; + union { + struct { + enum virtchnl_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); + +/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ + +struct virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); + +struct virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct virtchnl_iwarp_qv_info qv_info[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); + +/* VF reset states - these are written into the RSTAT register: + * VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, +}; + +/** + * virtchnl_vc_validate_vf_msg + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * validate msg format against struct for each opcode + */ +static inline int +virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len = 0; + + /* Validate message length. */ + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); + break; + case VIRTCHNL_OP_RESET_VF: + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(ver)) + valid_len = sizeof(u32); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); + break; + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; + if (qv->num_vectors == 0) { + err_msg_format = true; + break; + } + valid_len += ((qv->num_vectors - 1) * + sizeof(struct virtchnl_iwarp_qv_info)); + } + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); + break; + /* These are always errors coming from the VF. */ + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: + default: + return VIRTCHNL_ERR_PARAM; + } + /* few more checks */ + if ((valid_len != msglen) || (err_msg_format)) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; +} +#endif /* _VIRTCHNL_H_ */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index c970a25d2a49..360c082e885c 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -7,6 +7,7 @@ struct sock; struct cgroup; struct sk_buff; +struct bpf_sock_ops_kern; #ifdef CONFIG_CGROUP_BPF @@ -42,6 +43,10 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, int __cgroup_bpf_run_filter_sk(struct sock *sk, enum bpf_attach_type type); +int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, + struct bpf_sock_ops_kern *sock_ops, + enum bpf_attach_type type); + /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ @@ -75,6 +80,18 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk, __ret; \ }) +#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled && (sock_ops)->sk) { \ + typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ + if (sk_fullsock(__sk)) \ + __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ + sock_ops, \ + BPF_CGROUP_SOCK_OPS); \ + } \ + __ret; \ +}) #else struct cgroup_bpf {}; @@ -85,6 +102,7 @@ static inline void cgroup_bpf_inherit(struct cgroup *cgrp, #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #endif /* CONFIG_CGROUP_BPF */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6bb38d76faf4..b69e7a5869ff 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -36,6 +36,7 @@ struct bpf_map_ops { int fd); void (*map_fd_put_ptr)(void *ptr); u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); + u32 (*map_fd_sys_lookup_elem)(void *ptr); }; struct bpf_map { @@ -46,6 +47,7 @@ struct bpf_map { u32 max_entries; u32 map_flags; u32 pages; + u32 id; struct user_struct *user; const struct bpf_map_ops *ops; struct work_struct work; @@ -148,6 +150,20 @@ enum bpf_reg_type { struct bpf_prog; +/* The information passed from prog-specific *_is_valid_access + * back to the verifier. + */ +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + int ctx_field_size; +}; + +static inline void +bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) +{ + aux->ctx_field_size = size; +} + struct bpf_verifier_ops { /* return eBPF function prototype for verification */ const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); @@ -156,13 +172,13 @@ struct bpf_verifier_ops { * with 'type' (read or write) is allowed */ bool (*is_valid_access)(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type); + struct bpf_insn_access_aux *info); int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, const struct bpf_prog *prog); u32 (*convert_ctx_access)(enum bpf_access_type type, const struct bpf_insn *src, struct bpf_insn *dst, - struct bpf_prog *prog); + struct bpf_prog *prog, u32 *target_size); int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); }; @@ -171,6 +187,8 @@ struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; + u32 stack_depth; + u32 id; struct latch_tree_node ksym_tnode; struct list_head ksym_lnode; const struct bpf_verifier_ops *ops; @@ -276,9 +294,11 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags); +int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); void bpf_fd_array_map_clear(struct bpf_map *map); int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags); +int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and * forced to use 'long' read/writes to try to atomically copy long counters. diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 03bf223f18be..3d137c33d664 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -10,6 +10,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops_prog_ops) #endif #ifdef CONFIG_BPF_EVENTS BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index d5093b52b485..621076f56251 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -73,6 +73,8 @@ struct bpf_insn_aux_data { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ }; + int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ + int converted_op_size; /* the valid value width after perceived conversion */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 62d948f80730..f1fc9baa3509 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -57,6 +57,9 @@ struct bpf_prog_aux; #define BPF_REG_AX MAX_BPF_REG #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) +/* unused opcode to mark special call to bpf_tail_call() helper */ +#define BPF_TAIL_CALL 0xf0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -66,8 +69,6 @@ struct bpf_prog_aux; /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 -#define BPF_TAG_SIZE 8 - /* Helper macros for filter block array initializers. */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ @@ -336,6 +337,22 @@ struct bpf_prog_aux; bpf_size; \ }) +#define bpf_size_to_bytes(bpf_size) \ +({ \ + int bytes = -EINVAL; \ + \ + if (bpf_size == BPF_B) \ + bytes = sizeof(u8); \ + else if (bpf_size == BPF_H) \ + bytes = sizeof(u16); \ + else if (bpf_size == BPF_W) \ + bytes = sizeof(u32); \ + else if (bpf_size == BPF_DW) \ + bytes = sizeof(u64); \ + \ + bytes; \ +}) + #define BPF_SIZEOF(type) \ ({ \ const int __size = bytes_to_bpf_size(sizeof(type)); \ @@ -350,6 +367,13 @@ struct bpf_prog_aux; __size; \ }) +#define BPF_LDST_BYTES(insn) \ + ({ \ + const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \ + WARN_ON(__size < 0); \ + __size; \ + }) + #define __BPF_MAP_0(m, v, ...) v #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) @@ -400,6 +424,18 @@ struct bpf_prog_aux; #define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) #define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) +#define bpf_ctx_range(TYPE, MEMBER) \ + offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 +#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ + offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 + +#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ + ({ \ + BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \ + *(PTR_SIZE) = (SIZE); \ + offsetof(TYPE, MEMBER); \ + }) + #ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { @@ -429,6 +465,7 @@ struct bpf_prog { kmemcheck_bitfield_end(meta); enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ + u32 jited_len; /* Size of jited insns in bytes */ u8 tag[BPF_TAG_SIZE]; struct bpf_prog_aux *aux; /* Auxiliary fields */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ @@ -562,6 +599,18 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) return prog->type == BPF_PROG_TYPE_UNSPEC; } +static inline bool +bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default) +{ + bool off_ok; +#ifdef __LITTLE_ENDIAN + off_ok = (off & (size_default - 1)) == 0; +#else + off_ok = (off & (size_default - 1)) + size == size_default; +#endif + return off_ok && size <= size_default && (size & (size - 1)) == 0; +} + #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) #ifdef CONFIG_ARCH_HAS_SET_MEMORY @@ -896,4 +945,13 @@ static inline int bpf_tell_extensions(void) return SKF_AD_MAX; } +struct bpf_sock_ops_kern { + struct sock *sk; + u32 op; + union { + u32 reply; + u32 replylong[4]; + }; +}; + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 69033353d0d1..55a604ad459f 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2113,10 +2113,43 @@ enum ieee80211_key_len { #define PMK_MAX_LEN 48 -/* Public action codes */ +/* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */ enum ieee80211_pub_actioncode { + WLAN_PUB_ACTION_20_40_BSS_COEX = 0, + WLAN_PUB_ACTION_DSE_ENABLEMENT = 1, + WLAN_PUB_ACTION_DSE_DEENABLEMENT = 2, + WLAN_PUB_ACTION_DSE_REG_LOC_ANN = 3, WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, + WLAN_PUB_ACTION_DSE_MSMT_REQ = 5, + WLAN_PUB_ACTION_DSE_MSMT_RESP = 6, + WLAN_PUB_ACTION_MSMT_PILOT = 7, + WLAN_PUB_ACTION_DSE_PC = 8, + WLAN_PUB_ACTION_VENDOR_SPECIFIC = 9, + WLAN_PUB_ACTION_GAS_INITIAL_REQ = 10, + WLAN_PUB_ACTION_GAS_INITIAL_RESP = 11, + WLAN_PUB_ACTION_GAS_COMEBACK_REQ = 12, + WLAN_PUB_ACTION_GAS_COMEBACK_RESP = 13, WLAN_PUB_ACTION_TDLS_DISCOVER_RES = 14, + WLAN_PUB_ACTION_LOC_TRACK_NOTI = 15, + WLAN_PUB_ACTION_QAB_REQUEST_FRAME = 16, + WLAN_PUB_ACTION_QAB_RESPONSE_FRAME = 17, + WLAN_PUB_ACTION_QMF_POLICY = 18, + WLAN_PUB_ACTION_QMF_POLICY_CHANGE = 19, + WLAN_PUB_ACTION_QLOAD_REQUEST = 20, + WLAN_PUB_ACTION_QLOAD_REPORT = 21, + WLAN_PUB_ACTION_HCCA_TXOP_ADVERT = 22, + WLAN_PUB_ACTION_HCCA_TXOP_RESPONSE = 23, + WLAN_PUB_ACTION_PUBLIC_KEY = 24, + WLAN_PUB_ACTION_CHANNEL_AVAIL_QUERY = 25, + WLAN_PUB_ACTION_CHANNEL_SCHEDULE_MGMT = 26, + WLAN_PUB_ACTION_CONTACT_VERI_SIGNAL = 27, + WLAN_PUB_ACTION_GDD_ENABLEMENT_REQ = 28, + WLAN_PUB_ACTION_GDD_ENABLEMENT_RESP = 29, + WLAN_PUB_ACTION_NETWORK_CHANNEL_CONTROL = 30, + WLAN_PUB_ACTION_WHITE_SPACE_MAP_ANN = 31, + WLAN_PUB_ACTION_FTM_REQUEST = 32, + WLAN_PUB_ACTION_FTM = 33, + WLAN_PUB_ACTION_FILS_DISCOVERY = 34, }; /* TDLS action codes */ @@ -2400,7 +2433,11 @@ enum ieee80211_sa_query_action { #define WLAN_MAX_KEY_LEN 32 +#define WLAN_PMK_NAME_LEN 16 #define WLAN_PMKID_LEN 16 +#define WLAN_PMK_LEN_EAP_LEAP 16 +#define WLAN_PMK_LEN 32 +#define WLAN_PMK_LEN_SUITE_B_192 48 #define WLAN_OUI_WFA 0x506f9a #define WLAN_OUI_TYPE_WFA_P2P 9 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 0c16866a7aac..3cd18ac0697f 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -62,6 +62,7 @@ int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); +bool br_multicast_enabled(const struct net_device *dev); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) @@ -78,6 +79,19 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, { return false; } +static inline bool br_multicast_enabled(const struct net_device *dev) +{ + return false; +} +#endif + +#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) +bool br_vlan_enabled(const struct net_device *dev); +#else +static inline bool br_vlan_enabled(const struct net_device *dev) +{ + return false; +} #endif #endif diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 3482c3c2037d..4837157da0dc 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -3,6 +3,7 @@ #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); +struct skb_array *tap_get_skb_array(struct file *file); #else #include <linux/err.h> #include <linux/errno.h> @@ -12,6 +13,10 @@ static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } +static inline struct skb_array *tap_get_skb_array(struct file *f) +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_TAP */ #include <net/sock.h> diff --git a/include/linux/if_team.h b/include/linux/if_team.h index c05216a8fbac..30294603526f 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -298,4 +298,6 @@ extern void team_mode_unregister(const struct team_mode *mode); #define TEAM_DEFAULT_NUM_TX_QUEUES 16 #define TEAM_DEFAULT_NUM_RX_QUEUES 16 +#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind) + #endif /* _LINUX_IF_TEAM_H_ */ diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index ed6da2e6df90..bf9bdf42d577 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -19,6 +19,7 @@ #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); +struct skb_array *tun_get_skb_array(struct file *file); #else #include <linux/err.h> #include <linux/errno.h> @@ -28,5 +29,9 @@ static inline struct socket *tun_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } +static inline struct skb_array *tun_get_skb_array(struct file *f) +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_TUN */ #endif /* __IF_TUN_H */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 283dc2f5364d..5e6a2d4dc366 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -318,7 +318,7 @@ static inline int __vlan_insert_tag(struct sk_buff *skb, if (skb_cow_head(skb, VLAN_HLEN) < 0) return -ENOMEM; - veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); + veth = skb_push(skb, VLAN_HLEN); /* Move the mac addresses to the beginning of the new header. */ memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 12f6fba6d21a..97caf1821de8 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -18,6 +18,7 @@ #include <linux/skbuff.h> #include <linux/timer.h> #include <linux/in.h> +#include <linux/refcount.h> #include <uapi/linux/igmp.h> static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb) @@ -84,7 +85,7 @@ struct ip_mc_list { struct ip_mc_list __rcu *next_hash; struct timer_list timer; int users; - atomic_t refcnt; + refcount_t refcnt; spinlock_t lock; char tm_running; char reporter; diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index a2e9d6ea1349..fb3f809e34e4 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -11,6 +11,7 @@ #include <linux/timer.h> #include <linux/sysctl.h> #include <linux/rtnetlink.h> +#include <linux/refcount.h> struct ipv4_devconf { void *sysctl; @@ -22,7 +23,7 @@ struct ipv4_devconf { struct in_device { struct net_device *dev; - atomic_t refcnt; + refcount_t refcnt; int dead; struct in_ifaddr *ifa_list; /* IP ifaddr chain */ @@ -150,8 +151,15 @@ struct in_ifaddr { unsigned long ifa_tstamp; /* updated timestamp */ }; +struct in_validator_info { + __be32 ivi_addr; + struct in_device *ivi_dev; +}; + int register_inetaddr_notifier(struct notifier_block *nb); int unregister_inetaddr_notifier(struct notifier_block *nb); +int register_inetaddr_validator_notifier(struct notifier_block *nb); +int unregister_inetaddr_validator_notifier(struct notifier_block *nb); void inet_netconf_notify_devconf(struct net *net, int event, int type, int ifindex, struct ipv4_devconf *devconf); @@ -212,7 +220,7 @@ static inline struct in_device *in_dev_get(const struct net_device *dev) rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (in_dev) - atomic_inc(&in_dev->refcnt); + refcount_inc(&in_dev->refcnt); rcu_read_unlock(); return in_dev; } @@ -233,12 +241,12 @@ void in_dev_finish_destroy(struct in_device *idev); static inline void in_dev_put(struct in_device *idev) { - if (atomic_dec_and_test(&idev->refcnt)) + if (refcount_dec_and_test(&idev->refcnt)) in_dev_finish_destroy(idev); } -#define __in_dev_put(idev) atomic_dec(&(idev)->refcnt) -#define in_dev_hold(idev) atomic_inc(&(idev)->refcnt) +#define __in_dev_put(idev) refcount_dec(&(idev)->refcnt) +#define in_dev_hold(idev) refcount_inc(&(idev)->refcnt) #endif /* __KERNEL__ */ diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index ac02c54520e9..a7330eb3ec64 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -554,7 +554,7 @@ _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) if (!skb) return NULL; if (len) - memcpy(skb_put(skb, len), dp, len); + skb_put_data(skb, dp, len); hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = id; diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index f541da68d1e7..472fa4d4ea62 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,6 +37,8 @@ #define PHY_ID_KSZ8795 0x00221550 +#define PHY_ID_KSZ9477 0x00221631 + /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 #define MICREL_PHY_FXEN 0x00000002 diff --git a/include/linux/mii.h b/include/linux/mii.h index 1629a0c32679..e870bfa6abfe 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -31,7 +31,7 @@ struct mii_if_info { extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); -extern int mii_ethtool_get_link_ksettings( +extern void mii_ethtool_get_link_ksettings( struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern int mii_ethtool_set_link_ksettings( diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index a940ec6a046c..f31a0b5377e1 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -300,6 +300,8 @@ enum mlx5_event { MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, + + MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, }; enum { @@ -973,6 +975,7 @@ enum mlx5_cap_type { MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, + MLX5_CAP_FPGA, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1091,9 +1094,18 @@ enum mlx5_mcam_feature_groups { #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) +#define MLX5_CAP_MCAM_REG(mdev, reg) \ + MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg) + #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) +#define MLX5_CAP_FPGA(mdev, cap) \ + MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) + +#define MLX5_CAP64_FPGA(mdev, cap) \ + MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 93273d9ea4d1..df6ce59a1f95 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -44,6 +44,7 @@ #include <linux/workqueue.h> #include <linux/mempool.h> #include <linux/interrupt.h> +#include <linux/idr.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> @@ -108,6 +109,9 @@ enum { MLX5_REG_QTCT = 0x400a, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, + MLX5_REG_FPGA_CAP = 0x4022, + MLX5_REG_FPGA_CTRL = 0x4023, + MLX5_REG_FPGA_ACCESS_REG = 0x4024, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -129,6 +133,9 @@ enum { MLX5_REG_MPCNT = 0x9051, MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, + MLX5_REG_MCQI = 0x9061, + MLX5_REG_MCC = 0x9062, + MLX5_REG_MCDA = 0x9063, MLX5_REG_MCAM = 0x907f, }; @@ -732,6 +739,14 @@ struct mlx5e_resources { struct mlx5_sq_bfreg bfreg; }; +#define MLX5_MAX_RESERVED_GIDS 8 + +struct mlx5_rsvd_gids { + unsigned int start; + unsigned int count; + struct ida ida; +}; + struct mlx5_core_dev { struct pci_dev *pdev; /* sync pci state */ @@ -761,6 +776,13 @@ struct mlx5_core_dev { atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; + struct { + struct mlx5_rsvd_gids reserved_gids; + atomic_t roce_en; + } roce; +#ifdef CONFIG_MLX5_FPGA + struct mlx5_fpga_device *fpga; +#endif #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap; #endif @@ -812,6 +834,7 @@ struct mlx5_cmd_work_ent { u64 ts1; u64 ts2; u16 op; + bool polling; }; struct mlx5_pas { @@ -895,11 +918,6 @@ static inline u16 cmdif_rev(struct mlx5_core_dev *dev) return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; } -static inline void *mlx5_vzalloc(unsigned long size) -{ - return kvzalloc(size, GFP_KERNEL); -} - static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; @@ -915,6 +933,8 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context); +int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); @@ -925,6 +945,8 @@ int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); +void mlx5_trigger_health_work(struct mlx5_core_dev *dev); +void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); @@ -1038,6 +1060,11 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, bool map_wc, bool fast_path); void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); +unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); +int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, + u8 roce_version, u8 roce_l3_type, const u8 *gid, + const u8 *mac, bool vlan, u16 vlan_id); + static inline int fw_initializing(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->initializing) >> 31; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index edafedb7b509..87869c04849a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -32,6 +32,8 @@ #ifndef MLX5_IFC_H #define MLX5_IFC_H +#include "mlx5_ifc_fpga.h" + enum { MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0, MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1, @@ -56,7 +58,8 @@ enum { MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b, MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f, MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, - MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb + MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb, + MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20, }; enum { @@ -229,6 +232,11 @@ enum { MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, + MLX5_CMD_OP_FPGA_CREATE_QP = 0x960, + MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961, + MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, + MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963, + MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964, MLX5_CMD_OP_MAX }; @@ -240,7 +248,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_first_prio[0x1]; u8 outer_first_cfi[0x1]; u8 outer_first_vid[0x1]; - u8 reserved_at_7[0x1]; + u8 outer_ipv4_ttl[0x1]; u8 outer_second_prio[0x1]; u8 outer_second_cfi[0x1]; u8 outer_second_vid[0x1]; @@ -377,7 +385,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; - u8 reserved_at_c0[0x20]; + u8 reserved_at_c0[0x18]; + u8 ttl_hoplimit[0x8]; u8 udp_sport[0x10]; u8 udp_dport[0x10]; @@ -596,7 +605,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 tunnel_statless_gre[0x1]; u8 tunnel_stateless_vxlan[0x1]; - u8 reserved_at_20[0x20]; + u8 swp[0x1]; + u8 swp_csum[0x1]; + u8 swp_lso[0x1]; + u8 reserved_at_23[0x1d]; u8 reserved_at_40[0x10]; u8 lro_min_mss_size[0x10]; @@ -658,9 +670,9 @@ enum { struct mlx5_ifc_atomic_caps_bits { u8 reserved_at_0[0x40]; - u8 atomic_req_8B_endianess_mode[0x2]; + u8 atomic_req_8B_endianness_mode[0x2]; u8 reserved_at_42[0x4]; - u8 supported_atomic_req_8B_endianess_mode_1[0x1]; + u8 supported_atomic_req_8B_endianness_mode_1[0x1]; u8 reserved_at_47[0x19]; @@ -798,7 +810,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_indirection[0x8]; u8 fixed_buffer_size[0x1]; u8 log_max_mrw_sz[0x7]; - u8 reserved_at_110[0x2]; + u8 force_teardown[0x1]; + u8 reserved_at_111[0x1]; u8 log_max_bsf_list_size[0x6]; u8 umr_extended_translation_offset[0x1]; u8 null_mkey[0x1]; @@ -819,7 +832,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cc_modify_allowed[0x1]; u8 start_pad[0x1]; u8 cache_line_128byte[0x1]; - u8 reserved_at_163[0xb]; + u8 reserved_at_165[0xb]; u8 gid_table_size[0x10]; u8 out_of_seq_cnt[0x1]; @@ -860,7 +873,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_tc[0x4]; u8 reserved_at_1d0[0x1]; u8 dcbx[0x1]; - u8 reserved_at_1d2[0x4]; + u8 reserved_at_1d2[0x3]; + u8 fpga[0x1]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_at_1d8[0x1]; @@ -2194,6 +2208,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; + struct mlx5_ifc_fpga_cap_bits fpga_cap; u8 reserved_at_0[0x8000]; }; @@ -2426,7 +2441,8 @@ struct mlx5_ifc_sqc_bits { u8 min_wqe_inline_mode[0x3]; u8 state[0x4]; u8 reg_umr[0x1]; - u8 reserved_at_d[0x13]; + u8 allow_swp[0x1]; + u8 reserved_at_e[0x12]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -3089,18 +3105,25 @@ struct mlx5_ifc_tsar_element_bits { u8 reserved_at_10[0x10]; }; +enum { + MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, + MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, +}; + struct mlx5_ifc_teardown_hca_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x3f]; + + u8 force_state[0x1]; }; enum { MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, - MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1, + MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, }; struct mlx5_ifc_teardown_hca_in_bits { @@ -4606,6 +4629,7 @@ enum { MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14, MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15, MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, + MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { @@ -6622,6 +6646,24 @@ struct mlx5_ifc_create_flow_table_out_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_flow_table_context_bits { + u8 encap_en[0x1]; + u8 decap_en[0x1]; + u8 reserved_at_2[0x2]; + u8 table_miss_action[0x4]; + u8 level[0x8]; + u8 reserved_at_10[0x8]; + u8 log_size[0x8]; + + u8 reserved_at_20[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_at_40[0x8]; + u8 lag_master_next_table_id[0x18]; + + u8 reserved_at_60[0xe0]; +}; + struct mlx5_ifc_create_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -6640,21 +6682,7 @@ struct mlx5_ifc_create_flow_table_in_bits { u8 reserved_at_a0[0x20]; - u8 encap_en[0x1]; - u8 decap_en[0x1]; - u8 reserved_at_c2[0x2]; - u8 table_miss_mode[0x4]; - u8 level[0x8]; - u8 reserved_at_d0[0x8]; - u8 log_size[0x8]; - - u8 reserved_at_e0[0x8]; - u8 table_miss_id[0x18]; - - u8 reserved_at_100[0x8]; - u8 lag_master_next_table_id[0x18]; - - u8 reserved_at_120[0x80]; + struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_create_flow_group_out_bits { @@ -7286,7 +7314,8 @@ struct mlx5_ifc_ptys_reg_bits { u8 ib_link_width_oper[0x10]; u8 ib_proto_oper[0x10]; - u8 reserved_at_160[0x20]; + u8 reserved_at_160[0x1c]; + u8 connector_type[0x4]; u8 eth_proto_lp_advertise[0x20]; @@ -7689,8 +7718,10 @@ struct mlx5_ifc_peir_reg_bits { }; struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x7e]; + u8 reserved_at_0[0x7c]; + u8 ptys_connector_type[0x1]; + u8 reserved_at_7d[0x1]; u8 ppcnt_discard_group[0x1]; u8 ppcnt_statistical_group[0x1]; }; @@ -7723,6 +7754,18 @@ struct mlx5_ifc_mcam_enhanced_features_bits { u8 pcie_performance_group[0x1]; }; +struct mlx5_ifc_mcam_access_reg_bits { + u8 reserved_at_0[0x1c]; + u8 mcda[0x1]; + u8 mcc[0x1]; + u8 mcqi[0x1]; + u8 reserved_at_1f[0x1]; + + u8 regs_95_to_64[0x20]; + u8 regs_63_to_32[0x20]; + u8 regs_31_to_0[0x20]; +}; + struct mlx5_ifc_mcam_reg_bits { u8 reserved_at_0[0x8]; u8 feature_group[0x8]; @@ -7732,6 +7775,7 @@ struct mlx5_ifc_mcam_reg_bits { u8 reserved_at_20[0x20]; union { + struct mlx5_ifc_mcam_access_reg_bits access_regs; u8 reserved_at_0[0x80]; } mng_access_reg_cap_mask; @@ -8143,6 +8187,85 @@ struct mlx5_ifc_mtppse_reg_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_mcqi_cap_bits { + u8 supported_info_bitmask[0x20]; + + u8 component_size[0x20]; + + u8 max_component_size[0x20]; + + u8 log_mcda_word_size[0x4]; + u8 reserved_at_64[0xc]; + u8 mcda_max_write_size[0x10]; + + u8 rd_en[0x1]; + u8 reserved_at_81[0x1]; + u8 match_chip_id[0x1]; + u8 match_psid[0x1]; + u8 check_user_timestamp[0x1]; + u8 match_base_guid_mac[0x1]; + u8 reserved_at_86[0x1a]; +}; + +struct mlx5_ifc_mcqi_reg_bits { + u8 read_pending_component[0x1]; + u8 reserved_at_1[0xf]; + u8 component_index[0x10]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x1b]; + u8 info_type[0x5]; + + u8 info_size[0x20]; + + u8 offset[0x20]; + + u8 reserved_at_a0[0x10]; + u8 data_size[0x10]; + + u8 data[0][0x20]; +}; + +struct mlx5_ifc_mcc_reg_bits { + u8 reserved_at_0[0x4]; + u8 time_elapsed_since_last_cmd[0xc]; + u8 reserved_at_10[0x8]; + u8 instruction[0x8]; + + u8 reserved_at_20[0x10]; + u8 component_index[0x10]; + + u8 reserved_at_40[0x8]; + u8 update_handle[0x18]; + + u8 handle_owner_type[0x4]; + u8 handle_owner_host_id[0x4]; + u8 reserved_at_68[0x1]; + u8 control_progress[0x7]; + u8 error_code[0x8]; + u8 reserved_at_78[0x4]; + u8 control_state[0x4]; + + u8 component_size[0x20]; + + u8 reserved_at_a0[0x60]; +}; + +struct mlx5_ifc_mcda_reg_bits { + u8 reserved_at_0[0x8]; + u8 update_handle[0x18]; + + u8 offset[0x20]; + + u8 reserved_at_40[0x10]; + u8 size[0x10]; + + u8 reserved_at_60[0x20]; + + u8 data[0][0x20]; +}; + union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@ -8190,6 +8313,12 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_sltp_reg_bits sltp_reg; struct mlx5_ifc_mtpps_reg_bits mtpps_reg; struct mlx5_ifc_mtppse_reg_bits mtppse_reg; + struct mlx5_ifc_fpga_access_reg_bits fpga_access_reg; + struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits; + struct mlx5_ifc_fpga_cap_bits fpga_cap_bits; + struct mlx5_ifc_mcqi_reg_bits mcqi_reg; + struct mlx5_ifc_mcc_reg_bits mcc_reg; + struct mlx5_ifc_mcda_reg_bits mcda_reg; u8 reserved_at_0[0x60e0]; }; @@ -8270,17 +8399,7 @@ struct mlx5_ifc_modify_flow_table_in_bits { u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_at_c0[0x4]; - u8 table_miss_mode[0x4]; - u8 reserved_at_c8[0x18]; - - u8 reserved_at_e0[0x8]; - u8 table_miss_id[0x18]; - - u8 reserved_at_100[0x8]; - u8 lag_master_next_table_id[0x18]; - - u8 reserved_at_120[0x80]; + struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_ets_tcn_config_reg_bits { diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h new file mode 100644 index 000000000000..255a88d08078 --- /dev/null +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -0,0 +1,432 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef MLX5_IFC_FPGA_H +#define MLX5_IFC_FPGA_H + +enum { + MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9, +}; + +enum { + MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, +}; + +struct mlx5_ifc_fpga_shell_caps_bits { + u8 max_num_qps[0x10]; + u8 reserved_at_10[0x8]; + u8 total_rcv_credits[0x8]; + + u8 reserved_at_20[0xe]; + u8 qp_type[0x2]; + u8 reserved_at_30[0x5]; + u8 rae[0x1]; + u8 rwe[0x1]; + u8 rre[0x1]; + u8 reserved_at_38[0x4]; + u8 dc[0x1]; + u8 ud[0x1]; + u8 uc[0x1]; + u8 rc[0x1]; + + u8 reserved_at_40[0x1a]; + u8 log_ddr_size[0x6]; + + u8 max_fpga_qp_msg_size[0x20]; + + u8 reserved_at_80[0x180]; +}; + +struct mlx5_ifc_fpga_cap_bits { + u8 fpga_id[0x8]; + u8 fpga_device[0x18]; + + u8 register_file_ver[0x20]; + + u8 fpga_ctrl_modify[0x1]; + u8 reserved_at_41[0x5]; + u8 access_reg_query_mode[0x2]; + u8 reserved_at_48[0x6]; + u8 access_reg_modify_mode[0x2]; + u8 reserved_at_50[0x10]; + + u8 reserved_at_60[0x20]; + + u8 image_version[0x20]; + + u8 image_date[0x20]; + + u8 image_time[0x20]; + + u8 shell_version[0x20]; + + u8 reserved_at_100[0x80]; + + struct mlx5_ifc_fpga_shell_caps_bits shell_caps; + + u8 reserved_at_380[0x8]; + u8 ieee_vendor_id[0x18]; + + u8 sandbox_product_version[0x10]; + u8 sandbox_product_id[0x10]; + + u8 sandbox_basic_caps[0x20]; + + u8 reserved_at_3e0[0x10]; + u8 sandbox_extended_caps_len[0x10]; + + u8 sandbox_extended_caps_addr[0x40]; + + u8 fpga_ddr_start_addr[0x40]; + + u8 fpga_cr_space_start_addr[0x40]; + + u8 fpga_ddr_size[0x20]; + + u8 fpga_cr_space_size[0x20]; + + u8 reserved_at_500[0x300]; +}; + +enum { + MLX5_FPGA_CTRL_OPERATION_LOAD = 0x1, + MLX5_FPGA_CTRL_OPERATION_RESET = 0x2, + MLX5_FPGA_CTRL_OPERATION_FLASH_SELECT = 0x3, + MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON = 0x4, + MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_OFF = 0x5, + MLX5_FPGA_CTRL_OPERATION_RESET_SANDBOX = 0x6, +}; + +struct mlx5_ifc_fpga_ctrl_bits { + u8 reserved_at_0[0x8]; + u8 operation[0x8]; + u8 reserved_at_10[0x8]; + u8 status[0x8]; + + u8 reserved_at_20[0x8]; + u8 flash_select_admin[0x8]; + u8 reserved_at_30[0x8]; + u8 flash_select_oper[0x8]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_FPGA_ERROR_EVENT_SYNDROME_CORRUPTED_DDR = 0x1, + MLX5_FPGA_ERROR_EVENT_SYNDROME_FLASH_TIMEOUT = 0x2, + MLX5_FPGA_ERROR_EVENT_SYNDROME_INTERNAL_LINK_ERROR = 0x3, + MLX5_FPGA_ERROR_EVENT_SYNDROME_WATCHDOG_FAILURE = 0x4, + MLX5_FPGA_ERROR_EVENT_SYNDROME_I2C_FAILURE = 0x5, + MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED = 0x6, + MLX5_FPGA_ERROR_EVENT_SYNDROME_TEMPERATURE_CRITICAL = 0x7, +}; + +struct mlx5_ifc_fpga_error_event_bits { + u8 reserved_at_0[0x40]; + + u8 reserved_at_40[0x18]; + u8 syndrome[0x8]; + + u8 reserved_at_60[0x80]; +}; + +#define MLX5_FPGA_ACCESS_REG_SIZE_MAX 64 + +struct mlx5_ifc_fpga_access_reg_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x10]; + u8 size[0x10]; + + u8 address[0x40]; + + u8 data[0][0x8]; +}; + +enum mlx5_ifc_fpga_qp_state { + MLX5_FPGA_QPC_STATE_INIT = 0x0, + MLX5_FPGA_QPC_STATE_ACTIVE = 0x1, + MLX5_FPGA_QPC_STATE_ERROR = 0x2, +}; + +enum mlx5_ifc_fpga_qp_type { + MLX5_FPGA_QPC_QP_TYPE_SHELL_QP = 0x0, + MLX5_FPGA_QPC_QP_TYPE_SANDBOX_QP = 0x1, +}; + +enum mlx5_ifc_fpga_qp_service_type { + MLX5_FPGA_QPC_ST_RC = 0x0, +}; + +struct mlx5_ifc_fpga_qpc_bits { + u8 state[0x4]; + u8 reserved_at_4[0x1b]; + u8 qp_type[0x1]; + + u8 reserved_at_20[0x4]; + u8 st[0x4]; + u8 reserved_at_28[0x10]; + u8 traffic_class[0x8]; + + u8 ether_type[0x10]; + u8 prio[0x3]; + u8 dei[0x1]; + u8 vid[0xc]; + + u8 reserved_at_60[0x20]; + + u8 reserved_at_80[0x8]; + u8 next_rcv_psn[0x18]; + + u8 reserved_at_a0[0x8]; + u8 next_send_psn[0x18]; + + u8 reserved_at_c0[0x10]; + u8 pkey[0x10]; + + u8 reserved_at_e0[0x8]; + u8 remote_qpn[0x18]; + + u8 reserved_at_100[0x15]; + u8 rnr_retry[0x3]; + u8 reserved_at_118[0x5]; + u8 retry_count[0x3]; + + u8 reserved_at_120[0x20]; + + u8 reserved_at_140[0x10]; + u8 remote_mac_47_32[0x10]; + + u8 remote_mac_31_0[0x20]; + + u8 remote_ip[16][0x8]; + + u8 reserved_at_200[0x40]; + + u8 reserved_at_240[0x10]; + u8 fpga_mac_47_32[0x10]; + + u8 fpga_mac_31_0[0x20]; + + u8 fpga_ip[16][0x8]; +}; + +struct mlx5_ifc_fpga_create_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_fpga_qpc_bits fpga_qpc; +}; + +struct mlx5_ifc_fpga_create_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 fpga_qpn[0x18]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_fpga_qpc_bits fpga_qpc; +}; + +struct mlx5_ifc_fpga_modify_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 fpga_qpn[0x18]; + + u8 field_select[0x20]; + + struct mlx5_ifc_fpga_qpc_bits fpga_qpc; +}; + +struct mlx5_ifc_fpga_modify_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_fpga_query_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 fpga_qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_fpga_query_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_fpga_qpc_bits fpga_qpc; +}; + +struct mlx5_ifc_fpga_query_qp_counters_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 clear[0x1]; + u8 reserved_at_41[0x7]; + u8 fpga_qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_fpga_query_qp_counters_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 rx_ack_packets[0x40]; + + u8 rx_send_packets[0x40]; + + u8 tx_ack_packets[0x40]; + + u8 tx_send_packets[0x40]; + + u8 rx_total_drop[0x40]; + + u8 reserved_at_1c0[0x1c0]; +}; + +struct mlx5_ifc_fpga_destroy_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 fpga_qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_fpga_destroy_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_ipsec_extended_cap_bits { + u8 encapsulation[0x20]; + + u8 reserved_0[0x15]; + u8 ipv4_fragment[0x1]; + u8 ipv6[0x1]; + u8 esn[0x1]; + u8 lso[0x1]; + u8 transport_and_tunnel_mode[0x1]; + u8 tunnel_mode[0x1]; + u8 transport_mode[0x1]; + u8 ah_esp[0x1]; + u8 esp[0x1]; + u8 ah[0x1]; + u8 ipv4_options[0x1]; + + u8 auth_alg[0x20]; + + u8 enc_alg[0x20]; + + u8 sa_cap[0x20]; + + u8 reserved_1[0x10]; + u8 number_of_ipsec_counters[0x10]; + + u8 ipsec_counters_addr_low[0x20]; + u8 ipsec_counters_addr_high[0x20]; +}; + +struct mlx5_ifc_ipsec_counters_bits { + u8 dec_in_packets[0x40]; + + u8 dec_out_packets[0x40]; + + u8 dec_bypass_packets[0x40]; + + u8 enc_in_packets[0x40]; + + u8 enc_out_packets[0x40]; + + u8 enc_bypass_packets[0x40]; + + u8 drop_dec_packets[0x40]; + + u8 failed_auth_dec_packets[0x40]; + + u8 drop_enc_packets[0x40]; + + u8 success_add_sa[0x40]; + + u8 fail_add_sa[0x40]; + + u8 success_delete_sa[0x40]; + + u8 fail_delete_sa[0x40]; + + u8 dropped_cmd[0x40]; +}; + +#endif /* MLX5_IFC_FPGA_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index e527732fb31b..c57d4b7de3a8 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -92,6 +92,19 @@ enum mlx5e_link_mode { MLX5E_LINK_MODES_NUMBER, }; +enum mlx5e_connector_type { + MLX5E_PORT_UNKNOWN = 0, + MLX5E_PORT_NONE = 1, + MLX5E_PORT_TP = 2, + MLX5E_PORT_AUI = 3, + MLX5E_PORT_BNC = 4, + MLX5E_PORT_MII = 5, + MLX5E_PORT_FIBRE = 6, + MLX5E_PORT_DA = 7, + MLX5E_PORT_OTHER = 8, + MLX5E_CONNECTOR_TYPE_NUMBER, +}; + #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index bef80d0a0e30..6f41270d80c0 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -225,10 +225,20 @@ enum { MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, }; +enum { + MLX5_ETH_WQE_SWP_INNER_L3_IPV6 = 1 << 0, + MLX5_ETH_WQE_SWP_INNER_L4_UDP = 1 << 1, + MLX5_ETH_WQE_SWP_OUTER_L3_IPV6 = 1 << 4, + MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5, +}; + struct mlx5_wqe_eth_seg { - u8 rsvd0[4]; + u8 swp_outer_l4_offset; + u8 swp_outer_l3_offset; + u8 swp_inner_l4_offset; + u8 swp_inner_l3_offset; u8 cs_flags; - u8 rsvd1; + u8 swp_flags; __be16 mss; __be32 rsvd2; union { @@ -569,8 +579,6 @@ int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, int reset, void *out, int out_size); -int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, - u32 *out_of_buffer); static inline const char *mlx5_qp_type_str(int type) { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 24e88b33a06c..e48ee2eaaa3e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -807,8 +807,10 @@ enum xdp_netdev_command { * when it is no longer used. */ XDP_SETUP_PROG, + XDP_SETUP_PROG_HW, /* Check if a bpf program is set on the device. The callee should - * return true if a program is currently attached and running. + * set @prog_attached to one of XDP_ATTACHED_* values, note that "true" + * is equivalent to XDP_ATTACHED_DRV. */ XDP_QUERY_PROG, }; @@ -820,11 +822,15 @@ struct netdev_xdp { union { /* XDP_SETUP_PROG */ struct { + u32 flags; struct bpf_prog *prog; struct netlink_ext_ack *extack; }; /* XDP_QUERY_PROG */ - bool prog_attached; + struct { + u8 prog_attached; + u32 prog_id; + }; }; }; @@ -971,7 +977,7 @@ struct xfrmdev_ops { * with PF and querying it may introduce a theoretical security risk. * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); - * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, + * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u32 chain_index, * __be16 protocol, struct tc_to_netdev *tc); * Called to setup any 'tc' scheduler, classifier or action on @dev. * This is always called from the stack with the rtnl lock held and netif @@ -1108,12 +1114,6 @@ struct xfrmdev_ops { * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing * the station and priv is the structure returned by the add * operation. - * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb, - * struct net_device *dev, - * void *priv); - * Callback to use for xmit over the accelerated station. This - * is used in place of ndo_start_xmit on accelerated net - * devices. * int (*ndo_set_tx_maxrate)(struct net_device *dev, * int queue_index, u32 maxrate); * Called when a user wants to set a max-rate limitation of specific @@ -1221,7 +1221,7 @@ struct net_device_ops { struct net_device *dev, int vf, bool setting); int (*ndo_setup_tc)(struct net_device *dev, - u32 handle, + u32 handle, u32 chain_index, __be16 protocol, struct tc_to_netdev *tc); #if IS_ENABLED(CONFIG_FCOE) @@ -1310,9 +1310,6 @@ struct net_device_ops { void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv); - netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, - struct net_device *dev, - void *priv); int (*ndo_get_lock_subclass)(struct net_device *dev); int (*ndo_set_tx_maxrate)(struct net_device *dev, int queue_index, @@ -1824,7 +1821,7 @@ struct net_device { #ifdef CONFIG_NET_SCHED DECLARE_HASHTABLE (qdisc_hash, 4); #endif - unsigned long tx_queue_len; + unsigned int tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; @@ -2457,6 +2454,7 @@ static inline int dev_recursion_level(void) struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); +struct net_device *dev_get_by_napi_id(unsigned int napi_id); int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); @@ -2574,9 +2572,7 @@ static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ __ret = __skb_gro_checksum_validate_complete(skb, \ compute_pseudo(skb, proto)); \ - if (__ret) \ - __skb_mark_checksum_bad(skb); \ - else \ + if (!__ret) \ skb_gro_incr_csum_unnecessary(skb); \ __ret; \ }) @@ -3304,7 +3300,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags); -bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op); +u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); @@ -3932,6 +3928,10 @@ void netdev_rss_key_fill(void *buffer, size_t len); int dev_get_nest_level(struct net_device *dev); int skb_checksum_help(struct sk_buff *skb); +int skb_crc32c_csum_help(struct sk_buff *skb); +int skb_csum_hwoffload_help(struct sk_buff *skb, + const netdev_features_t features); + struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 996711d8a7b4..41d04e9d088a 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -1,7 +1,6 @@ #ifndef _NFNETLINK_H #define _NFNETLINK_H - #include <linux/netlink.h> #include <linux/capability.h> #include <net/netlink.h> @@ -10,13 +9,16 @@ struct nfnl_callback { int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, - const struct nlattr * const cda[]); + const struct nlattr * const cda[], + struct netlink_ext_ack *extack); int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, - const struct nlattr * const cda[]); + const struct nlattr * const cda[], + struct netlink_ext_ack *extack); int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, - const struct nlattr * const cda[]); + const struct nlattr * const cda[], + struct netlink_ext_ack *extack); const struct nla_policy *policy; /* netlink attribute policy */ const u_int16_t attr_count; /* number of nlattr's */ }; diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index e0cbf17af780..2c2a5514b0df 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -122,8 +122,6 @@ extern unsigned int ebt_do_table(struct sk_buff *skb, #define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) /* Clear the bit in the hook mask that tells if the rule is on a base chain */ #define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS)) -/* True if the target is not a standard target */ -#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) static inline bool ebt_invalid_target(int target) { diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 5fff5ba5964e..8664fd26eb5d 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -97,6 +97,21 @@ struct netlink_ext_ack { #define NL_SET_ERR_MSG_MOD(extack, msg) \ NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg) +#define NL_SET_BAD_ATTR(extack, attr) do { \ + if ((extack)) \ + (extack)->bad_attr = (attr); \ +} while (0) + +#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ + static const char __msg[] = (msg); \ + struct netlink_ext_ack *__extack = (extack); \ + \ + if (__extack) { \ + __extack->_msg = __msg; \ + __extack->bad_attr = (attr); \ + } \ +} while (0) + extern void netlink_kernel_release(struct sock *sk); extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 1828900c9411..27c0aaa22cb0 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -11,6 +11,7 @@ #include <linux/interrupt.h> #include <linux/rcupdate.h> #include <linux/list.h> +#include <linux/refcount.h> union inet_addr { __u32 all[4]; @@ -34,7 +35,7 @@ struct netpoll { }; struct netpoll_info { - atomic_t refcnt; + refcount_t refcnt; struct semaphore dev_lock; diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index ba35ba520487..f5db93bcd069 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -27,11 +27,33 @@ struct phy_device *of_phy_attach(struct net_device *dev, phy_interface_t iface); extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); -extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); extern int of_phy_register_fixed_link(struct device_node *np); extern void of_phy_deregister_fixed_link(struct device_node *np); extern bool of_phy_is_fixed_link(struct device_node *np); + +static inline int of_mdio_parse_addr(struct device *dev, + const struct device_node *np) +{ + u32 addr; + int ret; + + ret = of_property_read_u32(np, "reg", &addr); + if (ret < 0) { + dev_err(dev, "%s has invalid PHY address\n", np->full_name); + return ret; + } + + /* A PHY must have a reg property in the range [0-31] */ + if (addr >= PHY_MAX_ADDR) { + dev_err(dev, "%s PHY address %i is too large\n", + np->full_name, addr); + return -EINVAL; + } + + return addr; +} + #else /* CONFIG_OF_MDIO */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) { diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7d6aa29094b2..a3b873fc59e4 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -898,7 +898,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); -extern u64 perf_event_read_local(struct perf_event *event); +int perf_event_read_local(struct perf_event *event, u64 *value); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); @@ -1303,7 +1303,10 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event * { return ERR_PTR(-EINVAL); } -static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } +static inline int perf_event_read_local(struct perf_event *event, u64 *value) +{ + return -EINVAL; +} static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } diff --git a/include/linux/phy.h b/include/linux/phy.h index e76e4adbc7c7..2a9567bb8186 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -58,13 +58,13 @@ #define PHY_IGNORE_INTERRUPT -2 #define PHY_HAS_INTERRUPT 0x00000001 -#define PHY_HAS_MAGICANEG 0x00000002 -#define PHY_IS_INTERNAL 0x00000004 +#define PHY_IS_INTERNAL 0x00000002 #define MDIO_DEVICE_IS_PHY 0x80000000 /* Interface Mode definitions */ typedef enum { PHY_INTERFACE_MODE_NA, + PHY_INTERFACE_MODE_INTERNAL, PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_SGMII, @@ -84,6 +84,9 @@ typedef enum { PHY_INTERFACE_MODE_1000BASEX, PHY_INTERFACE_MODE_2500BASEX, PHY_INTERFACE_MODE_RXAUI, + PHY_INTERFACE_MODE_XAUI, + /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */ + PHY_INTERFACE_MODE_10GKR, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -112,6 +115,8 @@ static inline const char *phy_modes(phy_interface_t interface) switch (interface) { case PHY_INTERFACE_MODE_NA: return ""; + case PHY_INTERFACE_MODE_INTERNAL: + return "internal"; case PHY_INTERFACE_MODE_MII: return "mii"; case PHY_INTERFACE_MODE_GMII: @@ -150,6 +155,10 @@ static inline const char *phy_modes(phy_interface_t interface) return "2500base-x"; case PHY_INTERFACE_MODE_RXAUI: return "rxaui"; + case PHY_INTERFACE_MODE_XAUI: + return "xaui"; + case PHY_INTERFACE_MODE_10GKR: + return "10gbase-kr"; default: return "unknown"; } @@ -220,10 +229,8 @@ struct mii_bus { /* GPIO reset pulse width in microseconds */ int reset_delay_us; - /* Number of reset GPIOs */ - int num_reset_gpios; - /* Array of RESET GPIO descriptors */ - struct gpio_desc **reset_gpiod; + /* RESET GPIO descriptor pointer */ + struct gpio_desc *reset_gpiod; }; #define to_mii_bus(d) container_of(d, struct mii_bus, dev) @@ -364,6 +371,8 @@ struct phy_c45_device_ids { * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc. * has_fixups: Set to true if this phy has fixups/quirks. * suspended: Set to true if this phy has been suspended successfully. + * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. + * loopback_enabled: Set true if this phy has been loopbacked successfully. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. * link_timeout: The number of timer firings to wait before the @@ -400,6 +409,8 @@ struct phy_device { bool is_pseudo_fixed_link; bool has_fixups; bool suspended; + bool sysfs_links; + bool loopback_enabled; enum phy_state state; @@ -639,6 +650,7 @@ struct phy_driver { int (*set_tunable)(struct phy_device *dev, struct ethtool_tunable *tuna, const void *data); + int (*set_loopback)(struct phy_device *dev, bool enable); }; #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) @@ -717,14 +729,24 @@ static inline bool phy_is_internal(struct phy_device *phydev) } /** + * phy_interface_mode_is_rgmii - Convenience function for testing if a + * PHY interface mode is RGMII (all variants) + * @mode: the phy_interface_t enum + */ +static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) +{ + return mode >= PHY_INTERFACE_MODE_RGMII && + mode <= PHY_INTERFACE_MODE_RGMII_TXID; +}; + +/** * phy_interface_is_rgmii - Convenience function for testing if a PHY interface * is RGMII (all variants) * @phydev: the phy_device struct */ static inline bool phy_interface_is_rgmii(struct phy_device *phydev) { - return phydev->interface >= PHY_INTERFACE_MODE_RGMII && - phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID; + return phy_interface_mode_is_rgmii(phydev->interface); }; /* @@ -774,6 +796,7 @@ void phy_device_remove(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); +int phy_loopback(struct phy_device *phydev, bool enable); struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, phy_interface_t interface); struct phy_device *phy_find_first(struct mii_bus *bus); @@ -793,6 +816,7 @@ int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); +int phy_restart_aneg(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { @@ -816,6 +840,8 @@ static inline const char *phydev_name(const struct phy_device *phydev) void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) __printf(2, 3); void phy_attached_info(struct phy_device *phydev); + +/* Clause 22 PHY */ int genphy_config_init(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); @@ -825,11 +851,22 @@ int genphy_update_link(struct phy_device *phydev); int genphy_read_status(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); +int genphy_loopback(struct phy_device *phydev, bool enable); int genphy_soft_reset(struct phy_device *phydev); static inline int genphy_no_soft_reset(struct phy_device *phydev) { return 0; } + +/* Clause 45 PHY */ +int genphy_c45_restart_aneg(struct phy_device *phydev); +int genphy_c45_aneg_done(struct phy_device *phydev); +int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask); +int genphy_c45_read_lpa(struct phy_device *phydev); +int genphy_c45_read_pma(struct phy_device *phydev); +int genphy_c45_pma_setup_forced(struct phy_device *phydev); +int genphy_c45_an_disable_aneg(struct phy_device *phydev); + void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver, struct module *owner); @@ -843,9 +880,8 @@ void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); void phy_trigger_machine(struct phy_device *phydev, bool sync); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); -int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); -int phy_ethtool_ksettings_get(struct phy_device *phydev, - struct ethtool_link_ksettings *cmd); +void phy_ethtool_ksettings_get(struct phy_device *phydev, + struct ethtool_link_ksettings *cmd); int phy_ethtool_ksettings_set(struct phy_device *phydev, const struct ethtool_link_ksettings *cmd); int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h new file mode 100644 index 000000000000..84789ca634aa --- /dev/null +++ b/include/linux/platform_data/microchip-ksz.h @@ -0,0 +1,29 @@ +/* + * Microchip KSZ series switch platform data + * + * Copyright (C) 2017 + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MICROCHIP_KSZ_H +#define __MICROCHIP_KSZ_H + +#include <linux/kernel.h> + +struct ksz_platform_data { + u32 chip_id; + u16 enabled_ports; +}; + +#endif diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h index a6f9d633f5be..9e75ac8d19be 100644 --- a/include/linux/platform_data/nfcmrvl.h +++ b/include/linux/platform_data/nfcmrvl.h @@ -23,7 +23,7 @@ struct nfcmrvl_platform_data { */ /* GPIO that is wired to RESET_N signal */ - unsigned int reset_n_io; + int reset_n_io; /* Tell if transport is muxed in HCI one */ unsigned int hci_muxed; diff --git a/include/linux/platform_data/st-nci.h b/include/linux/platform_data/st-nci.h deleted file mode 100644 index f6494b347c06..000000000000 --- a/include/linux/platform_data/st-nci.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Driver include for ST NCI NFC chip family. - * - * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef _ST_NCI_H_ -#define _ST_NCI_H_ - -#define ST_NCI_DRIVER_NAME "st_nci" - -struct st_nci_nfc_platform_data { - unsigned int gpio_reset; - unsigned int irq_polarity; - bool is_ese_present; - bool is_uicc_present; -}; - -#endif /* _ST_NCI_H_ */ diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6b2e0dd88569..d8c97ec8a8e6 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -278,6 +278,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i; + + for (i = 0; i < n; i++) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i] = ptr; + } + + return i; +} + /* * Note: resize (below) nests producer lock within consumer lock, so if you * call this in interrupt or BH context, you must disable interrupts/BH when @@ -328,6 +344,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) return ptr; } +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + /* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock. @@ -403,6 +468,61 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) return 0; } +/* + * Return entries into ring. Destroy entries that don't fit. + * + * Note: this is expected to be a rare slow path operation. + * + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, + void (*destroy)(void *)) +{ + unsigned long flags; + int head; + + spin_lock_irqsave(&r->consumer_lock, flags); + spin_lock(&r->producer_lock); + + if (!r->size) + goto done; + + /* + * Clean out buffered entries (for simplicity). This way following code + * can test entries for NULL and if not assume they are valid. + */ + head = r->consumer_head - 1; + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = r->consumer_head; + + /* + * Go over entries in batch, start moving head back and copy entries. + * Stop when we run into previously unconsumed entries. + */ + while (n) { + head = r->consumer_head - 1; + if (head < 0) + head = r->size - 1; + if (r->queue[head]) { + /* This batch entry will have to be destroyed. */ + goto done; + } + r->queue[head] = batch[--n]; + r->consumer_tail = r->consumer_head = head; + } + +done: + /* Destroy all entries left in the batch. */ + while (n) + destroy(batch[--n]); + spin_unlock(&r->producer_lock); + spin_unlock_irqrestore(&r->consumer_lock, flags); +} + static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, int size, gfp_t gfp, void (*destroy)(void *)) diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index fbab6e0514f0..39e2a2ac2471 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -38,6 +38,8 @@ #include <linux/slab.h> /* dma_addr_t manip */ +#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) +#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_REGPAIR_LE(x, val) do { \ @@ -96,12 +98,12 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 -#define MAX_NUM_LL2_RX_QUEUES 32 -#define MAX_NUM_LL2_TX_STATS_COUNTERS 32 +#define MAX_NUM_LL2_RX_QUEUES 48 +#define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 15 -#define FW_REVISION_VERSION 3 +#define FW_MINOR_VERSION 20 +#define FW_REVISION_VERSION 0 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -181,6 +183,14 @@ #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) + +#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) +#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) +#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2) +#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3) +#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4) +#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5) + /*****************/ /* DQ CONSTANTS */ /*****************/ @@ -457,7 +467,6 @@ #define PXP_BAR_DQ 1 /* PTT and GTT */ -#define PXP_NUM_PF_WINDOWS 12 #define PXP_PER_PF_ENTRY_SIZE 8 #define PXP_NUM_GLOBAL_WINDOWS 243 #define PXP_GLOBAL_ENTRY_SIZE 4 @@ -482,6 +491,7 @@ #define PXP_PF_ME_OPAQUE_ADDR 0x1f8 #define PXP_PF_ME_CONCRETE_ADDR 0x1fc +#define PXP_NUM_PF_WINDOWS 12 #define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 #define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS #define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 @@ -618,16 +628,21 @@ /*****************/ /* PRM CONSTANTS */ /*****************/ -#define PRM_DMA_PAD_BYTES_NUM 2 -/******************/ -/* SDMs CONSTANTS */ -/******************/ -#define SDM_OP_GEN_TRIG_NONE 0 -#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 -#define SDM_OP_GEN_TRIG_AGG_INT 2 -#define SDM_OP_GEN_TRIG_LOADER 4 -#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 -#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7 +#define PRM_DMA_PAD_BYTES_NUM 2 +/*****************/ +/* SDMs CONSTANTS */ +/*****************/ + +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 +#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 + +/********************/ +/* Completion types */ +/********************/ #define SDM_COMP_TYPE_NONE 0 #define SDM_COMP_TYPE_WAKE_THREAD 1 @@ -638,10 +653,11 @@ #define SDM_COMP_TYPE_INDICATE_ERROR 6 #define SDM_COMP_TYPE_RELEASE_THREAD 7 #define SDM_COMP_TYPE_RAM 8 +#define SDM_COMP_TYPE_INC_ORDER_CNT 9 -/******************/ -/* PBF CONSTANTS */ -/******************/ +/*****************/ +/* PBF Constants */ +/*****************/ /* Number of PBF command queue lines. Each line is 32B. */ #define PBF_MAX_CMD_LINES 3328 @@ -764,7 +780,7 @@ enum protocol_type { PROTOCOLID_ROCE, PROTOCOLID_CORE, PROTOCOLID_ETH, - PROTOCOLID_RESERVED4, + PROTOCOLID_IWARP, PROTOCOLID_RESERVED5, PROTOCOLID_PREROCE, PROTOCOLID_COMMON, @@ -861,7 +877,7 @@ enum db_dest { /* Enum of doorbell DPM types */ enum db_dpm_type { DPM_LEGACY, - DPM_ROCE, + DPM_RDMA, DPM_L2_INLINE, DPM_L2_BD, MAX_DB_DPM_TYPE @@ -884,8 +900,8 @@ struct db_l2_dpm_data { #define DB_L2_DPM_DATA_RESERVED0_SHIFT 27 #define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7 #define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28 -#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1 -#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31 +#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1 +#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31 }; /* Structure for SGE in a DPM doorbell of type DPM_L2_BD */ @@ -931,31 +947,33 @@ struct db_pwm_addr { }; /* Parameters to RoCE firmware, passed in EDPM doorbell */ -struct db_roce_dpm_params { +struct db_rdma_dpm_params { __le32 params; -#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F -#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0 -#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3 -#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6 -#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF -#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8 -#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF -#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16 -#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 -#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3 -#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; /* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ -struct db_roce_dpm_data { +struct db_rdma_dpm_data { __le16 icid; __le16 prod_val; - struct db_roce_dpm_params params; + struct db_rdma_dpm_params params; }; /* Igu interrupt command */ @@ -1026,6 +1044,42 @@ struct parsing_and_err_flags { #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; +struct parsing_err_flags { + __le16 flags; +#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 +}; + struct pb_context { __le32 crc[4]; }; @@ -1288,39 +1342,56 @@ struct tdif_task_context { struct timers_context { __le32 logical_client_0; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED0_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED0_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED0_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED0_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 __le32 logical_client_1; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED2_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED2_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED3_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED3_SHIFT 30 __le32 logical_client_2; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED2_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED2_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED4_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED4_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED5_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED5_SHIFT 30 __le32 host_expiration_fields; -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 -#define TIMERS_CONTEXT_RESERVED3_MASK 0x7 -#define TIMERS_CONTEXT_RESERVED3_SHIFT 29 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED6_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED6_SHIFT 27 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 +#define TIMERS_CONTEXT_RESERVED7_MASK 0x7 +#define TIMERS_CONTEXT_RESERVED7_SHIFT 29 }; + +enum tunnel_next_protocol { + e_unknown = 0, + e_l2 = 1, + e_ipv4 = 2, + e_ipv6 = 3, + MAX_TUNNEL_NEXT_PROTOCOL +}; + #endif /* __COMMON_HSI__ */ #endif diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 34d93eb5bfba..cb06e6e368e1 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -75,7 +75,8 @@ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) /* Maximum number of buffers, used for RX packet placement */ -#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_BD_THRESHOLD 12 /* num of MAC/VLAN filters */ #define ETH_NUM_MAC_FILTERS 512 diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 947a635d04bb..12fc9e788eea 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -13,7 +13,6 @@ /*********************/ #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 -#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600) struct fcoe_abts_pkt { __le32 abts_rsp_fc_payload_lo; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 69949f8e354b..85e086cba639 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -75,25 +75,13 @@ #define ISCSI_TARGET_MODE 1 /* iSCSI request op codes */ -#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0) -#define ISCSI_OPCODE_NOP_OUT ( \ - ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40) -#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1) -#define ISCSI_OPCODE_SCSI_CMD ( \ - ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40) -#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2) -#define ISCSI_OPCODE_TMF_REQUEST ( \ - ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3) -#define ISCSI_OPCODE_LOGIN_REQUEST ( \ - ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4) -#define ISCSI_OPCODE_TEXT_REQUEST ( \ - ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_DATA_OUT (5) -#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6) -#define ISCSI_OPCODE_LOGOUT_REQUEST ( \ - ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_NOP_OUT (0) +#define ISCSI_OPCODE_SCSI_CMD (1) +#define ISCSI_OPCODE_TMF_REQUEST (2) +#define ISCSI_OPCODE_LOGIN_REQUEST (3) +#define ISCSI_OPCODE_TEXT_REQUEST (4) +#define ISCSI_OPCODE_DATA_OUT (5) +#define ISCSI_OPCODE_LOGOUT_REQUEST (6) /* iSCSI response/messages op codes */ #define ISCSI_OPCODE_NOP_IN (0x20) @@ -172,17 +160,23 @@ struct iscsi_async_msg_hdr { struct iscsi_cmd_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_CMD_HDR_READ_MASK 0x1 -#define ISCSI_CMD_HDR_READ_SHIFT 6 -#define ISCSI_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_CMD_HDR_FINAL_SHIFT 7 - u8 opcode; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 hdr_first_byte; +#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F +#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 +#define ISCSI_CMD_HDR_IMM_MASK 0x1 +#define ISCSI_CMD_HDR_IMM_SHIFT 6 +#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 +#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 __le32 hdr_second_dword; #define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 @@ -790,9 +784,9 @@ enum iscsi_error_types { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, ISCSI_CONN_ERROR_DATA_OVERRUN, ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION, + ISCSI_CONN_ERROR_IP_OPTIONS_ERROR, + ISCSI_CONN_ERROR_PRS_ERRORS, + ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION, ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE, @@ -1304,22 +1298,6 @@ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_total_pdu_cnt; }; -struct iscsi_db_data { - u8 params; -#define ISCSI_DB_DATA_DEST_MASK 0x3 -#define ISCSI_DB_DATA_DEST_SHIFT 0 -#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 -#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 -#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 -#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 -#define ISCSI_DB_DATA_RESERVED_MASK 0x1 -#define ISCSI_DB_DATA_RESERVED_SHIFT 5 -#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 sq_prod; -}; - struct tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; @@ -1398,5 +1376,20 @@ struct tstorm_iscsi_task_ag_ctx { __le32 reg1; __le32 reg2; }; +struct iscsi_db_data { + u8 params; +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; #endif /* __ISCSI_COMMON__ */ diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h new file mode 100644 index 000000000000..b8b3e1cfae90 --- /dev/null +++ b/include/linux/qed/iwarp_common.h @@ -0,0 +1,53 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __IWARP_COMMON__ +#define __IWARP_COMMON__ +#include <linux/qed/rdma_common.h> +/************************/ +/* IWARP FW CONSTANTS */ +/************************/ + +#define IWARP_ACTIVE_MODE 0 +#define IWARP_PASSIVE_MODE 1 + +#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) + +#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) +#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) + +#define IWARP_MAX_QPS (64 * 1024) + +#endif /* __IWARP_COMMON__ */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 5cd7a4608c9b..59ddf9af909e 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -80,6 +80,11 @@ struct qed_chain_pbl_u32 { u32 cons_page_idx; }; +struct qed_chain_ext_pbl { + dma_addr_t p_pbl_phys; + void *p_pbl_virt; +}; + struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ u16 prod_idx; @@ -155,6 +160,8 @@ struct qed_chain { u32 size; u8 intended_use; + + bool b_external_pbl; }; #define QED_CHAIN_PBL_ENTRY_SIZE (8) diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index d66d16a559e1..0eef0a2b1901 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -47,8 +47,7 @@ struct qed_queue_start_common_params { /* Relative, but relevant only for PFs */ u8 stats_id; - /* These are always absolute */ - u16 sb; + struct qed_sb_info *p_sb; u8 sb_idx; }; @@ -74,6 +73,9 @@ struct qed_dev_eth_info { /* Legacy VF - this affects the datapath, so qede has to know */ bool is_legacy; + + /* Might depend on available resources [in case of VF] */ + bool xdp_supported; }; struct qed_update_vport_rss_params { diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index bd6bcb809415..1e015c50e6b8 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -24,6 +24,11 @@ struct qed_dev_fcoe_info { void __iomem *primary_dbq_rq_addr; void __iomem *secondary_bdq_rq_addr; + + u64 wwpn; + u64 wwnn; + + u8 num_cqs; }; struct qed_fcoe_params_offload { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index c70ac13a97e6..ef39c7f40ae6 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -156,6 +156,11 @@ struct qed_dcbx_get { struct qed_dcbx_admin_params local; }; +enum qed_nvm_images { + QED_NVM_IMAGE_ISCSI_CFG, + QED_NVM_IMAGE_FCOE_CFG, +}; + enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, @@ -180,6 +185,10 @@ struct qed_eth_pf_params { */ u16 num_cons; + /* per-VF number of CIDs */ + u8 num_vf_cons; +#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32) + /* To enable arfs, previous to HW-init a positive number needs to be * set [as filters require allocated searcher ILT memory]. * This will set the maximal number of configured steering-filters. @@ -328,6 +337,14 @@ struct qed_dev_info { /* MFW version */ u32 mfw_rev; +#define QED_MFW_VERSION_0_MASK 0x000000FF +#define QED_MFW_VERSION_0_OFFSET 0 +#define QED_MFW_VERSION_1_MASK 0x0000FF00 +#define QED_MFW_VERSION_1_OFFSET 8 +#define QED_MFW_VERSION_2_MASK 0x00FF0000 +#define QED_MFW_VERSION_2_OFFSET 16 +#define QED_MFW_VERSION_3_MASK 0xFF000000 +#define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; u8 mf_mode; @@ -337,12 +354,23 @@ struct qed_dev_info { bool wol_support; + /* MBI version */ + u32 mbi_version; +#define QED_MBI_VERSION_0_MASK 0x000000FF +#define QED_MBI_VERSION_0_OFFSET 0 +#define QED_MBI_VERSION_1_MASK 0x0000FF00 +#define QED_MBI_VERSION_1_OFFSET 8 +#define QED_MBI_VERSION_2_MASK 0x00FF0000 +#define QED_MBI_VERSION_2_OFFSET 16 + enum qed_dev_type dev_type; /* Output parameters for qede */ bool vxlan_enable; bool gre_enable; bool geneve_enable; + + u8 abs_pf_id; }; enum qed_sb_type { @@ -503,9 +531,7 @@ struct qed_common_ops { int (*set_power_state)(struct qed_dev *cdev, pci_power_t state); - void (*set_id)(struct qed_dev *cdev, - char name[], - char ver_str[]); + void (*set_name) (struct qed_dev *cdev, char name[]); /* Client drivers need to make this call before slowpath_start. * PF params required for the call before slowpath_start is @@ -608,12 +634,26 @@ struct qed_common_ops { enum qed_chain_cnt_type cnt_type, u32 num_elems, size_t elem_size, - struct qed_chain *p_chain); + struct qed_chain *p_chain, + struct qed_chain_ext_pbl *ext_pbl); void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); /** + * @brief nvm_get_image - reads an entire image from nvram + * + * @param cdev + * @param type - type of the request nvram image + * @param buf - preallocated buffer to fill with the image + * @param len - length of the allocated buffer + * + * @return 0 on success, error otherwise + */ + int (*nvm_get_image)(struct qed_dev *cdev, + enum qed_nvm_images type, u8 *buf, u16 len); + +/** * @brief get_coalesce - Get coalesce parameters in usec * * @param cdev @@ -700,11 +740,13 @@ struct qed_common_ops { (((value) >> (name ## _SHIFT)) & name ## _MASK) /* Debug print definitions */ -#define DP_ERR(cdev, fmt, ...) \ - pr_err("[%s:%d(%s)]" fmt, \ - __func__, __LINE__, \ - DP_NAME(cdev) ? DP_NAME(cdev) : "", \ - ## __VA_ARGS__) \ +#define DP_ERR(cdev, fmt, ...) \ + do { \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } while (0) #define DP_NOTICE(cdev, fmt, ...) \ do { \ @@ -869,9 +911,15 @@ struct qed_eth_stats { #define TX_PI(tc) (RX_PI + 1 + tc) struct qed_sb_cnt_info { - int sb_cnt; - int sb_iov_cnt; - int sb_free_blk; + /* Original, current, and free SBs for PF */ + int orig; + int cnt; + int free_cnt; + + /* Original, current and free SBS for child VFs */ + int iov_orig; + int iov_cnt; + int free_cnt_iov; }; static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 3414649133d2..111e606a74c8 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -210,6 +210,11 @@ struct qed_iscsi_cb_ops { * @param stats - pointer to struck that would be filled * we stats * @return 0 on success, error otherwise. + * @change_mac Change MAC of interface + * @param cdev + * @param handle - the connection handle. + * @param mac - new MAC to configure. + * @return 0 on success, otherwise error value. */ struct qed_iscsi_ops { const struct qed_common_ops *common; @@ -248,6 +253,8 @@ struct qed_iscsi_ops { int (*get_stats)(struct qed_dev *cdev, struct qed_iscsi_stats *stats); + + int (*change_mac)(struct qed_dev *cdev, u32 handle, const u8 *mac); }; const struct qed_iscsi_ops *qed_get_iscsi_ops(void); diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 4fb4666ea879..dd7a3b86bb9e 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -43,6 +43,36 @@ #include <linux/slab.h> #include <linux/qed/qed_if.h> +enum qed_ll2_conn_type { + QED_LL2_TYPE_FCOE, + QED_LL2_TYPE_ISCSI, + QED_LL2_TYPE_TEST, + QED_LL2_TYPE_OOO, + QED_LL2_TYPE_RESERVED2, + QED_LL2_TYPE_ROCE, + QED_LL2_TYPE_IWARP, + QED_LL2_TYPE_RESERVED3, + MAX_QED_LL2_RX_CONN_TYPE +}; + +enum qed_ll2_roce_flavor_type { + QED_LL2_ROCE, + QED_LL2_RROCE, + MAX_QED_LL2_ROCE_FLAVOR_TYPE +}; + +enum qed_ll2_tx_dest { + QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ + QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ + QED_LL2_TX_DEST_MAX +}; + +enum qed_ll2_error_handle { + QED_LL2_DROP_PACKET, + QED_LL2_DO_NOTHING, + QED_LL2_ASSERT, +}; + struct qed_ll2_stats { u64 gsi_invalid_hdr; u64 gsi_invalid_pkt_length; @@ -67,6 +97,105 @@ struct qed_ll2_stats { u64 sent_bcast_pkts; }; +struct qed_ll2_comp_rx_data { + void *cookie; + dma_addr_t rx_buf_addr; + u16 parse_flags; + u16 vlan; + bool b_last_packet; + u8 connection_handle; + + union { + u16 packet_length; + u16 data_length; + } length; + + u32 opaque_data_0; + u32 opaque_data_1; + + /* GSI only */ + u32 gid_dst[4]; + u16 qp_id; + + union { + u8 placement_offset; + u8 data_length_error; + } u; +}; + +typedef +void (*qed_ll2_complete_rx_packet_cb)(void *cxt, + struct qed_ll2_comp_rx_data *data); + +typedef +void (*qed_ll2_release_rx_packet_cb)(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, + bool b_last_packet); + +typedef +void (*qed_ll2_complete_tx_packet_cb)(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, + bool b_last_packet); + +typedef +void (*qed_ll2_release_tx_packet_cb)(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet); + +struct qed_ll2_cbs { + qed_ll2_complete_rx_packet_cb rx_comp_cb; + qed_ll2_release_rx_packet_cb rx_release_cb; + qed_ll2_complete_tx_packet_cb tx_comp_cb; + qed_ll2_release_tx_packet_cb tx_release_cb; + void *cookie; +}; + +struct qed_ll2_acquire_data_inputs { + enum qed_ll2_conn_type conn_type; + u16 mtu; + u16 rx_num_desc; + u16 rx_num_ooo_buffers; + u8 rx_drop_ttl0_flg; + u8 rx_vlan_removal_en; + u16 tx_num_desc; + u8 tx_max_bds_per_packet; + u8 tx_tc; + enum qed_ll2_tx_dest tx_dest; + enum qed_ll2_error_handle ai_err_packet_too_big; + enum qed_ll2_error_handle ai_err_no_buf; + u8 gsi_enable; +}; + +struct qed_ll2_acquire_data { + struct qed_ll2_acquire_data_inputs input; + const struct qed_ll2_cbs *cbs; + + /* Output container for LL2 connection's handle */ + u8 *p_connection_handle; +}; + +struct qed_ll2_tx_pkt_info { + void *cookie; + dma_addr_t first_frag; + enum qed_ll2_tx_dest tx_dest; + enum qed_ll2_roce_flavor_type qed_roce_flavor; + u16 vlan; + u16 l4_hdr_offset_w; /* from start of packet */ + u16 first_frag_len; + u8 num_of_bds; + u8 bd_flags; + bool enable_ip_cksum; + bool enable_l4_cksum; + bool calc_ip_len; +}; + #define QED_LL2_UNUSED_HANDLE (0xff) struct qed_ll2_cb_ops { diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_rdma_if.h index cbb2ff0ce4bc..4dd72ba210f5 100644 --- a/include/linux/qed/qed_roce_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -29,13 +29,11 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef _QED_ROCE_IF_H -#define _QED_ROCE_IF_H +#ifndef _QED_RDMA_IF_H +#define _QED_RDMA_IF_H #include <linux/types.h> #include <linux/delay.h> #include <linux/list.h> -#include <linux/mutex.h> -#include <linux/pci.h> #include <linux/slab.h> #include <linux/qed/qed_if.h> #include <linux/qed/qed_ll2_if.h> @@ -472,6 +470,101 @@ struct qed_rdma_counters_out_params { #define QED_ROCE_TX_HEAD_FAILURE (1) #define QED_ROCE_TX_FRAG_FAILURE (2) +enum qed_iwarp_event_type { + QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ + QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */ + QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */ + QED_IWARP_EVENT_DISCONNECT, + QED_IWARP_EVENT_CLOSE, + QED_IWARP_EVENT_IRQ_FULL, + QED_IWARP_EVENT_RQ_EMPTY, + QED_IWARP_EVENT_LLP_TIMEOUT, + QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR, + QED_IWARP_EVENT_CQ_OVERFLOW, + QED_IWARP_EVENT_QP_CATASTROPHIC, + QED_IWARP_EVENT_ACTIVE_MPA_REPLY, + QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, + QED_IWARP_EVENT_REMOTE_OPERATION_ERROR, + QED_IWARP_EVENT_TERMINATE_RECEIVED +}; + +enum qed_tcp_ip_version { + QED_TCP_IPV4, + QED_TCP_IPV6, +}; + +struct qed_iwarp_cm_info { + enum qed_tcp_ip_version ip_version; + u32 remote_ip[4]; + u32 local_ip[4]; + u16 remote_port; + u16 local_port; + u16 vlan; + u8 ord; + u8 ird; + u16 private_data_len; + const void *private_data; +}; + +struct qed_iwarp_cm_event_params { + enum qed_iwarp_event_type event; + const struct qed_iwarp_cm_info *cm_info; + void *ep_context; /* To be passed to accept call */ + int status; +}; + +typedef int (*iwarp_event_handler) (void *context, + struct qed_iwarp_cm_event_params *event); + +struct qed_iwarp_connect_in { + iwarp_event_handler event_cb; + void *cb_context; + struct qed_rdma_qp *qp; + struct qed_iwarp_cm_info cm_info; + u16 mss; + u8 remote_mac_addr[ETH_ALEN]; + u8 local_mac_addr[ETH_ALEN]; +}; + +struct qed_iwarp_connect_out { + void *ep_context; +}; + +struct qed_iwarp_listen_in { + iwarp_event_handler event_cb; + void *cb_context; /* passed to event_cb */ + u32 max_backlog; + enum qed_tcp_ip_version ip_version; + u32 ip_addr[4]; + u16 port; + u16 vlan; +}; + +struct qed_iwarp_listen_out { + void *handle; +}; + +struct qed_iwarp_accept_in { + void *ep_context; + void *cb_context; + struct qed_rdma_qp *qp; + const void *private_data; + u16 private_data_len; + u8 ord; + u8 ird; +}; + +struct qed_iwarp_reject_in { + void *ep_context; + void *cb_context; + const void *private_data; + u16 private_data_len; +}; + +struct qed_iwarp_send_rtr_in { + void *ep_context; +}; + struct qed_roce_ll2_header { void *vaddr; dma_addr_t baddr; @@ -491,44 +584,9 @@ struct qed_roce_ll2_packet { enum qed_roce_ll2_tx_dest tx_dest; }; -struct qed_roce_ll2_tx_params { - int reserved; -}; - -struct qed_roce_ll2_rx_params { - u16 vlan_id; - u8 smac[ETH_ALEN]; - int rc; -}; - -struct qed_roce_ll2_cbs { - void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt); - - void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt, - struct qed_roce_ll2_rx_params *params); -}; - -struct qed_roce_ll2_params { - u16 max_rx_buffers; - u16 max_tx_buffers; - u16 mtu; - u8 mac_address[ETH_ALEN]; - struct qed_roce_ll2_cbs cbs; - void *cb_cookie; -}; - -struct qed_roce_ll2_info { - u8 handle; - struct qed_roce_ll2_cbs cbs; - u8 mac_address[ETH_ALEN]; - void *cb_cookie; - - /* Lock to protect ll2 */ - struct mutex lock; -}; - enum qed_rdma_type { QED_RDMA_TYPE_ROCE, + QED_RDMA_TYPE_IWARP }; struct qed_dev_rdma_info { @@ -579,26 +637,58 @@ struct qed_rdma_ops { int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *oparams); int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); + int (*rdma_register_tid)(void *rdma_cxt, struct qed_rdma_register_tid_in_params *iparams); + int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); void (*rdma_free_tid)(void *rdma_cxt, u32 itid); - int (*roce_ll2_start)(struct qed_dev *cdev, - struct qed_roce_ll2_params *params); - int (*roce_ll2_stop)(struct qed_dev *cdev); - int (*roce_ll2_tx)(struct qed_dev *cdev, - struct qed_roce_ll2_packet *packet, - struct qed_roce_ll2_tx_params *params); - int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev, - struct qed_roce_ll2_buffer *buf, - u64 cookie, u8 notify_fw); - int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev, - u8 *old_mac_address, - u8 *new_mac_address); - int (*roce_ll2_stats)(struct qed_dev *cdev, - struct qed_ll2_stats *stats); + + int (*ll2_acquire_connection)(void *rdma_cxt, + struct qed_ll2_acquire_data *data); + + int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle); + int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle); + void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle); + + int (*ll2_prepare_tx_packet)(void *rdma_cxt, + u8 connection_handle, + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw); + + int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt, + u8 connection_handle, + dma_addr_t addr, + u16 nbytes); + int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle, + dma_addr_t addr, u16 buf_len, void *cookie, + u8 notify_fw); + int (*ll2_get_stats)(void *rdma_cxt, + u8 connection_handle, + struct qed_ll2_stats *p_stats); + int (*ll2_set_mac_filter)(struct qed_dev *cdev, + u8 *old_mac_address, u8 *new_mac_address); + + int (*iwarp_connect)(void *rdma_cxt, + struct qed_iwarp_connect_in *iparams, + struct qed_iwarp_connect_out *oparams); + + int (*iwarp_create_listen)(void *rdma_cxt, + struct qed_iwarp_listen_in *iparams, + struct qed_iwarp_listen_out *oparams); + + int (*iwarp_accept)(void *rdma_cxt, + struct qed_iwarp_accept_in *iparams); + + int (*iwarp_reject)(void *rdma_cxt, + struct qed_iwarp_reject_in *iparams); + + int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle); + + int (*iwarp_send_rtr)(void *rdma_cxt, + struct qed_iwarp_send_rtr_in *iparams); }; const struct qed_rdma_ops *qed_get_rdma_ops(void); diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_rdma.h index 3b8dd551a98c..9904617a9730 100644 --- a/include/linux/qed/qede_roce.h +++ b/include/linux/qed/qede_rdma.h @@ -32,22 +32,27 @@ #ifndef QEDE_ROCE_H #define QEDE_ROCE_H +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/types.h> +#include <linux/workqueue.h> + struct qedr_dev; struct qed_dev; struct qede_dev; -enum qede_roce_event { +enum qede_rdma_event { QEDE_UP, QEDE_DOWN, QEDE_CHANGE_ADDR, QEDE_CLOSE }; -struct qede_roce_event_work { +struct qede_rdma_event_work { struct list_head list; struct work_struct work; void *ptr; - enum qede_roce_event event; + enum qede_rdma_event event; }; struct qedr_driver { @@ -57,32 +62,33 @@ struct qedr_driver { struct net_device *); void (*remove)(struct qedr_dev *); - void (*notify)(struct qedr_dev *, enum qede_roce_event); + void (*notify)(struct qedr_dev *, enum qede_rdma_event); }; -/* APIs for RoCE driver to register callback handlers, +/* APIs for RDMA driver to register callback handlers, * which will be invoked when device is added, removed, ifup, ifdown */ -int qede_roce_register_driver(struct qedr_driver *drv); -void qede_roce_unregister_driver(struct qedr_driver *drv); +int qede_rdma_register_driver(struct qedr_driver *drv); +void qede_rdma_unregister_driver(struct qedr_driver *drv); -bool qede_roce_supported(struct qede_dev *dev); +bool qede_rdma_supported(struct qede_dev *dev); #if IS_ENABLED(CONFIG_QED_RDMA) -int qede_roce_dev_add(struct qede_dev *dev); -void qede_roce_dev_event_open(struct qede_dev *dev); -void qede_roce_dev_event_close(struct qede_dev *dev); -void qede_roce_dev_remove(struct qede_dev *dev); -void qede_roce_event_changeaddr(struct qede_dev *qedr); +int qede_rdma_dev_add(struct qede_dev *dev); +void qede_rdma_dev_event_open(struct qede_dev *dev); +void qede_rdma_dev_event_close(struct qede_dev *dev); +void qede_rdma_dev_remove(struct qede_dev *dev); +void qede_rdma_event_changeaddr(struct qede_dev *edr); + #else -static inline int qede_roce_dev_add(struct qede_dev *dev) +static inline int qede_rdma_dev_add(struct qede_dev *dev) { return 0; } -static inline void qede_roce_dev_event_open(struct qede_dev *dev) {} -static inline void qede_roce_dev_event_close(struct qede_dev *dev) {} -static inline void qede_roce_dev_remove(struct qede_dev *dev) {} -static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {} +static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {} +static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {} +static inline void qede_rdma_dev_remove(struct qede_dev *dev) {} +static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {} #endif #endif diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 72c770f9f666..a9b3050f469c 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -42,7 +42,7 @@ #define RDMA_MAX_SGE_PER_SQ_WQE (4) #define RDMA_MAX_SGE_PER_RQ_WQE (4) -#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF) +#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) #define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) #define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 866f063026de..fe6a33e45977 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -37,6 +37,8 @@ #define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) #define ROCE_MAX_QPS (32 * 1024) +#define ROCE_DCQCN_NP_MAX_QPS (64) +#define ROCE_DCQCN_RP_MAX_QPS (64) enum roce_async_events_type { ROCE_ASYNC_EVENT_NONE = 0, diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index a5e843268f0e..dbf7a43c3e1f 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -111,7 +111,6 @@ struct tcp_offload_params { __le32 snd_wnd; __le32 rcv_wnd; __le32 snd_wl1; - __le32 ts_time; __le32 ts_recent; __le32 ts_recent_age; __le32 total_rt; @@ -122,7 +121,7 @@ struct tcp_offload_params { u8 ka_probe_cnt; u8 rt_cnt; __le16 rtt_var; - __le16 reserved2; + __le16 fw_internal; __le32 ka_timeout; __le32 ka_interval; __le32 max_rt_time; @@ -130,7 +129,7 @@ struct tcp_offload_params { u8 snd_wnd_scale; u8 ack_frequency; __le16 da_timeout_value; - __le32 ts_ticks_per_second; + __le32 reserved3[2]; }; struct tcp_offload_params_opt2 { diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 57e54847b0b9..dea59c8eec54 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -18,7 +18,8 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, - unsigned change, gfp_t flags); + unsigned change, u32 event, + gfp_t flags); void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags); diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h index c68307bc306f..7343f71783dc 100644 --- a/include/linux/rxrpc.h +++ b/include/linux/rxrpc.h @@ -37,6 +37,8 @@ struct sockaddr_rxrpc { #define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ #define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */ #define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ +#define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */ +#define RXRPC_SUPPORTED_CMSG 6 /* Get highest supported control message type */ /* * RxRPC control messages @@ -44,15 +46,20 @@ struct sockaddr_rxrpc { * - terminal messages mean that a user call ID tag can be recycled * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg() */ -#define RXRPC_USER_CALL_ID 1 /* sr: user call ID specifier */ -#define RXRPC_ABORT 2 /* sr: abort request / notification [terminal] */ -#define RXRPC_ACK 3 /* -r: [Service] RPC op final ACK received [terminal] */ -#define RXRPC_NET_ERROR 5 /* -r: network error received [terminal] */ -#define RXRPC_BUSY 6 /* -r: server busy received [terminal] */ -#define RXRPC_LOCAL_ERROR 7 /* -r: local error generated [terminal] */ -#define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */ -#define RXRPC_ACCEPT 9 /* s-: [Service] accept request */ -#define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */ +enum rxrpc_cmsg_type { + RXRPC_USER_CALL_ID = 1, /* sr: user call ID specifier */ + RXRPC_ABORT = 2, /* sr: abort request / notification [terminal] */ + RXRPC_ACK = 3, /* -r: [Service] RPC op final ACK received [terminal] */ + RXRPC_NET_ERROR = 5, /* -r: network error received [terminal] */ + RXRPC_BUSY = 6, /* -r: server busy received [terminal] */ + RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */ + RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */ + RXRPC_ACCEPT = 9, /* s-: [Service] accept request */ + RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ + RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ + RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ + RXRPC__SUPPORTED +}; /* * RxRPC security levels diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 7a4804c4a593..99e866487e2f 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -57,12 +57,12 @@ #include <uapi/linux/sctp.h> /* Section 3.1. SCTP Common Header Format */ -typedef struct sctphdr { +struct sctphdr { __be16 source; __be16 dest; __be32 vtag; __le32 checksum; -} sctp_sctphdr_t; +}; static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) { @@ -70,11 +70,11 @@ static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) } /* Section 3.2. Chunk Field Descriptions. */ -typedef struct sctp_chunkhdr { +struct sctp_chunkhdr { __u8 type; __u8 flags; __be16 length; -} sctp_chunkhdr_t; +}; /* Section 3.2. Chunk Type Values. @@ -82,7 +82,7 @@ typedef struct sctp_chunkhdr { * Value field. It takes a value from 0 to 254. The value of 255 is * reserved for future use as an extension field. */ -typedef enum { +enum sctp_cid { SCTP_CID_DATA = 0, SCTP_CID_INIT = 1, SCTP_CID_INIT_ACK = 2, @@ -109,7 +109,7 @@ typedef enum { SCTP_CID_ASCONF = 0xC1, SCTP_CID_ASCONF_ACK = 0x80, SCTP_CID_RECONF = 0x82, -} sctp_cid_t; /* enum */ +}; /* enum */ /* Section 3.2 @@ -117,12 +117,12 @@ typedef enum { * the action that must be taken if the processing endpoint does not * recognize the Chunk Type. */ -typedef enum { +enum { SCTP_CID_ACTION_DISCARD = 0x00, SCTP_CID_ACTION_DISCARD_ERR = 0x40, SCTP_CID_ACTION_SKIP = 0x80, SCTP_CID_ACTION_SKIP_ERR = 0xc0, -} sctp_cid_action_t; +}; enum { SCTP_CID_ACTION_MASK = 0xc0, }; @@ -162,12 +162,12 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 }; * Section 3.2.1 Optional/Variable-length Parmaeter Format. */ -typedef struct sctp_paramhdr { +struct sctp_paramhdr { __be16 type; __be16 length; -} sctp_paramhdr_t; +}; -typedef enum { +enum sctp_param { /* RFC 2960 Section 3.3.5 */ SCTP_PARAM_HEARTBEAT_INFO = cpu_to_be16(1), @@ -207,7 +207,7 @@ typedef enum { SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010), SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011), SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012), -} sctp_param_t; /* enum */ +}; /* enum */ /* RFC 2960 Section 3.2.1 @@ -216,29 +216,29 @@ typedef enum { * not recognize the Parameter Type. * */ -typedef enum { +enum { SCTP_PARAM_ACTION_DISCARD = cpu_to_be16(0x0000), SCTP_PARAM_ACTION_DISCARD_ERR = cpu_to_be16(0x4000), SCTP_PARAM_ACTION_SKIP = cpu_to_be16(0x8000), SCTP_PARAM_ACTION_SKIP_ERR = cpu_to_be16(0xc000), -} sctp_param_action_t; +}; enum { SCTP_PARAM_ACTION_MASK = cpu_to_be16(0xc000), }; /* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */ -typedef struct sctp_datahdr { +struct sctp_datahdr { __be32 tsn; __be16 stream; __be16 ssn; __be32 ppid; __u8 payload[0]; -} sctp_datahdr_t; +}; -typedef struct sctp_data_chunk { - sctp_chunkhdr_t chunk_hdr; - sctp_datahdr_t data_hdr; -} sctp_data_chunk_t; +struct sctp_data_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_datahdr data_hdr; +}; /* DATA Chuck Specific Flags */ enum { @@ -257,54 +257,54 @@ enum { SCTP_DATA_FRAG_MASK = 0x03, }; * This chunk is used to initiate a SCTP association between two * endpoints. */ -typedef struct sctp_inithdr { +struct sctp_inithdr { __be32 init_tag; __be32 a_rwnd; __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; __u8 params[0]; -} sctp_inithdr_t; +}; -typedef struct sctp_init_chunk { - sctp_chunkhdr_t chunk_hdr; - sctp_inithdr_t init_hdr; -} sctp_init_chunk_t; +struct sctp_init_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_inithdr init_hdr; +}; /* Section 3.3.2.1. IPv4 Address Parameter (5) */ typedef struct sctp_ipv4addr_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; struct in_addr addr; } sctp_ipv4addr_param_t; /* Section 3.3.2.1. IPv6 Address Parameter (6) */ typedef struct sctp_ipv6addr_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; struct in6_addr addr; } sctp_ipv6addr_param_t; /* Section 3.3.2.1 Cookie Preservative (9) */ typedef struct sctp_cookie_preserve_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __be32 lifespan_increment; } sctp_cookie_preserve_param_t; /* Section 3.3.2.1 Host Name Address (11) */ typedef struct sctp_hostname_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; uint8_t hostname[0]; } sctp_hostname_param_t; /* Section 3.3.2.1 Supported Address Types (12) */ typedef struct sctp_supported_addrs_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __be16 types[0]; } sctp_supported_addrs_param_t; /* Appendix A. ECN Capable (32768) */ typedef struct sctp_ecn_capable_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; } sctp_ecn_capable_param_t; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ @@ -321,19 +321,19 @@ typedef struct sctp_supported_ext_param { /* AUTH Section 3.1 Random */ typedef struct sctp_random_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __u8 random_val[0]; } sctp_random_param_t; /* AUTH Section 3.2 Chunk List */ typedef struct sctp_chunks_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __u8 chunks[0]; } sctp_chunks_param_t; /* AUTH Section 3.3 HMAC Algorithm */ typedef struct sctp_hmac_algo_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __be16 hmac_ids[0]; } sctp_hmac_algo_param_t; @@ -341,18 +341,18 @@ typedef struct sctp_hmac_algo_param { * The INIT ACK chunk is used to acknowledge the initiation of an SCTP * association. */ -typedef sctp_init_chunk_t sctp_initack_chunk_t; +typedef struct sctp_init_chunk sctp_initack_chunk_t; /* Section 3.3.3.1 State Cookie (7) */ typedef struct sctp_cookie_param { - sctp_paramhdr_t p; + struct sctp_paramhdr p; __u8 body[0]; } sctp_cookie_param_t; /* Section 3.3.3.1 Unrecognized Parameters (8) */ typedef struct sctp_unrecognized_param { - sctp_paramhdr_t param_hdr; - sctp_paramhdr_t unrecognized; + struct sctp_paramhdr param_hdr; + struct sctp_paramhdr unrecognized; } sctp_unrecognized_param_t; @@ -386,7 +386,7 @@ typedef struct sctp_sackhdr { } sctp_sackhdr_t; typedef struct sctp_sack_chunk { - sctp_chunkhdr_t chunk_hdr; + struct sctp_chunkhdr chunk_hdr; sctp_sackhdr_t sack_hdr; } sctp_sack_chunk_t; @@ -399,11 +399,11 @@ typedef struct sctp_sack_chunk { */ typedef struct sctp_heartbeathdr { - sctp_paramhdr_t info; + struct sctp_paramhdr info; } sctp_heartbeathdr_t; typedef struct sctp_heartbeat_chunk { - sctp_chunkhdr_t chunk_hdr; + struct sctp_chunkhdr chunk_hdr; sctp_heartbeathdr_t hb_hdr; } sctp_heartbeat_chunk_t; @@ -413,7 +413,7 @@ typedef struct sctp_heartbeat_chunk { * chunk descriptor. */ typedef struct sctp_abort_chunk { - sctp_chunkhdr_t uh; + struct sctp_chunkhdr uh; } sctp_abort_chunk_t; @@ -425,8 +425,8 @@ typedef struct sctp_shutdownhdr { } sctp_shutdownhdr_t; struct sctp_shutdown_chunk_t { - sctp_chunkhdr_t chunk_hdr; - sctp_shutdownhdr_t shutdown_hdr; + struct sctp_chunkhdr chunk_hdr; + sctp_shutdownhdr_t shutdown_hdr; }; /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ @@ -438,8 +438,8 @@ typedef struct sctp_errhdr { } sctp_errhdr_t; typedef struct sctp_operr_chunk { - sctp_chunkhdr_t chunk_hdr; - sctp_errhdr_t err_hdr; + struct sctp_chunkhdr chunk_hdr; + sctp_errhdr_t err_hdr; } sctp_operr_chunk_t; /* RFC 2960 3.3.10 - Operation Error @@ -528,7 +528,7 @@ typedef struct sctp_ecnehdr { } sctp_ecnehdr_t; typedef struct sctp_ecne_chunk { - sctp_chunkhdr_t chunk_hdr; + struct sctp_chunkhdr chunk_hdr; sctp_ecnehdr_t ence_hdr; } sctp_ecne_chunk_t; @@ -540,7 +540,7 @@ typedef struct sctp_cwrhdr { } sctp_cwrhdr_t; typedef struct sctp_cwr_chunk { - sctp_chunkhdr_t chunk_hdr; + struct sctp_chunkhdr chunk_hdr; sctp_cwrhdr_t cwr_hdr; } sctp_cwr_chunk_t; @@ -639,7 +639,7 @@ struct sctp_fwdtsn_chunk { * report status of ASCONF processing. */ typedef struct sctp_addip_param { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __be32 crr_id; } sctp_addip_param_t; @@ -649,7 +649,7 @@ typedef struct sctp_addiphdr { } sctp_addiphdr_t; typedef struct sctp_addip_chunk { - sctp_chunkhdr_t chunk_hdr; + struct sctp_chunkhdr chunk_hdr; sctp_addiphdr_t addip_hdr; } sctp_addip_chunk_t; @@ -709,7 +709,7 @@ typedef struct sctp_authhdr { } sctp_authhdr_t; typedef struct sctp_auth_chunk { - sctp_chunkhdr_t chunk_hdr; + struct sctp_chunkhdr chunk_hdr; sctp_authhdr_t auth_hdr; } sctp_auth_chunk_t; @@ -719,12 +719,12 @@ struct sctp_infox { }; struct sctp_reconf_chunk { - sctp_chunkhdr_t chunk_hdr; + struct sctp_chunkhdr chunk_hdr; __u8 params[0]; }; struct sctp_strreset_outreq { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __u32 request_seq; __u32 response_seq; __u32 send_reset_at_tsn; @@ -732,18 +732,18 @@ struct sctp_strreset_outreq { }; struct sctp_strreset_inreq { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __u32 request_seq; __u16 list_of_streams[0]; }; struct sctp_strreset_tsnreq { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __u32 request_seq; }; struct sctp_strreset_addstrm { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __u32 request_seq; __u16 number_of_streams; __u16 reserved; @@ -760,13 +760,13 @@ enum { }; struct sctp_strreset_resp { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __u32 response_seq; __u32 result; }; struct sctp_strreset_resptsn { - sctp_paramhdr_t param_hdr; + struct sctp_paramhdr param_hdr; __u32 response_seq; __u32 result; __u32 senders_next_tsn; diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index f4dfade428f0..35226cd4efb0 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -97,21 +97,46 @@ static inline struct sk_buff *skb_array_consume(struct skb_array *a) return ptr_ring_consume(&a->ring); } +static inline int skb_array_consume_batched(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched(&a->ring, (void **)array, n); +} + static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) { return ptr_ring_consume_irq(&a->ring); } +static inline int skb_array_consume_batched_irq(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n); +} + static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) { return ptr_ring_consume_any(&a->ring); } +static inline int skb_array_consume_batched_any(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_any(&a->ring, (void **)array, n); +} + + static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) { return ptr_ring_consume_bh(&a->ring); } +static inline int skb_array_consume_batched_bh(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n); +} + static inline int __skb_array_len_with_tag(struct sk_buff *skb) { if (likely(skb)) { @@ -156,6 +181,12 @@ static void __skb_array_destroy_skb(void *ptr) kfree_skb(ptr); } +static inline void skb_array_unconsume(struct skb_array *a, + struct sk_buff **skbs, int n) +{ + ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb); +} + static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) { return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 25b1659c832a..dbe29b6c9bd6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -109,6 +109,7 @@ * may perform further validation in this case. * GRE: only if the checksum is present in the header. * SCTP: indicates the CRC in SCTP header has been validated. + * FCOE: indicates the CRC in FC frame has been validated. * * skb->csum_level indicates the number of consecutive checksums found in * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. @@ -126,8 +127,10 @@ * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the * hardware doesn't need to parse L3/L4 headers to implement this. * - * Note: Even if device supports only some protocols, but is able to produce - * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. + * Notes: + * - Even if device supports only some protocols, but is able to produce + * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. + * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols. * * CHECKSUM_PARTIAL: * @@ -162,14 +165,11 @@ * * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate - * checksum offload capability. If a device has limited checksum capabilities - * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as - * described above) a helper function can be called to resolve - * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper - * function takes a spec argument that describes the protocol layer that is - * supported for checksum offload and can be called for each packet. If a - * packet does not match the specification for offload, skb_checksum_help - * is called to resolve the checksum. + * checksum offload capability. + * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based + * on network device checksumming capabilities: if a packet does not match + * them, skb_checksum_help or skb_crc32c_help (depending on the value of + * csum_not_inet, see item D.) is called to resolve the checksum. * * CHECKSUM_NONE: * @@ -189,11 +189,13 @@ * * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of * offloading the SCTP CRC in a packet. To perform this offload the stack - * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset - * accordingly. Note the there is no indication in the skbuff that the - * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports - * both IP checksum offload and SCTP CRC offload must verify which offload - * is configured for a packet presumably by inspecting packet headers. + * will set set csum_start and csum_offset accordingly, set ip_summed to + * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in + * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. + * A driver that supports both IP checksum offload and SCTP CRC32c offload + * must verify which offload is configured for a packet by testing the + * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve + * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1. * * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of * offloading the FCOE CRC in a packet. To perform this offload the stack @@ -250,7 +252,7 @@ struct nf_conntrack { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { - atomic_t use; + refcount_t use; enum { BRNF_PROTO_UNCHANGED, BRNF_PROTO_8021Q, @@ -506,66 +508,6 @@ typedef unsigned int sk_buff_data_t; typedef unsigned char *sk_buff_data_t; #endif -/** - * struct skb_mstamp - multi resolution time stamps - * @stamp_us: timestamp in us resolution - * @stamp_jiffies: timestamp in jiffies - */ -struct skb_mstamp { - union { - u64 v64; - struct { - u32 stamp_us; - u32 stamp_jiffies; - }; - }; -}; - -/** - * skb_mstamp_get - get current timestamp - * @cl: place to store timestamps - */ -static inline void skb_mstamp_get(struct skb_mstamp *cl) -{ - u64 val = local_clock(); - - do_div(val, NSEC_PER_USEC); - cl->stamp_us = (u32)val; - cl->stamp_jiffies = (u32)jiffies; -} - -/** - * skb_mstamp_delta - compute the difference in usec between two skb_mstamp - * @t1: pointer to newest sample - * @t0: pointer to oldest sample - */ -static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, - const struct skb_mstamp *t0) -{ - s32 delta_us = t1->stamp_us - t0->stamp_us; - u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies; - - /* If delta_us is negative, this might be because interval is too big, - * or local_clock() drift is too big : fallback using jiffies. - */ - if (delta_us <= 0 || - delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ))) - - delta_us = jiffies_to_usecs(delta_jiffies); - - return delta_us; -} - -static inline bool skb_mstamp_after(const struct skb_mstamp *t1, - const struct skb_mstamp *t0) -{ - s32 diff = t1->stamp_jiffies - t0->stamp_jiffies; - - if (!diff) - diff = t1->stamp_us - t0->stamp_us; - return diff > 0; -} - /** * struct sk_buff - socket buffer * @next: Next buffer in list @@ -616,6 +558,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS + * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL * @dst_pending_confirm: need to confirm neighbour * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking @@ -646,7 +589,7 @@ struct sk_buff { union { ktime_t tstamp; - struct skb_mstamp skb_mstamp; + u64 skb_mstamp; }; }; struct rb_node rbnode; /* used in netem & tcp stack */ @@ -744,7 +687,7 @@ struct sk_buff { __u8 csum_valid:1; __u8 csum_complete_sw:1; __u8 csum_level:2; - __u8 csum_bad:1; + __u8 csum_not_inet:1; __u8 dst_pending_confirm:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE @@ -818,7 +761,7 @@ struct sk_buff { unsigned char *head, *data; unsigned int truesize; - atomic_t users; + refcount_t users; }; #ifdef __KERNEL__ @@ -915,10 +858,34 @@ static inline bool skb_pkt_type_ok(u32 ptype) return ptype <= PACKET_OTHERHOST; } +static inline unsigned int skb_napi_id(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + return skb->napi_id; +#else + return 0; +#endif +} + +/* decrement the reference count and return true if we can free the skb */ +static inline bool skb_unref(struct sk_buff *skb) +{ + if (unlikely(!skb)) + return false; + if (likely(refcount_read(&skb->users) == 1)) + smp_rmb(); + else if (likely(!refcount_dec_and_test(&skb->users))) + return false; + + return true; +} + +void skb_release_head_state(struct sk_buff *skb); void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_tx_error(struct sk_buff *skb); void consume_skb(struct sk_buff *skb); +void consume_stateless_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); extern struct kmem_cache *skbuff_head_cache; @@ -948,7 +915,7 @@ struct sk_buff_fclones { struct sk_buff skb2; - atomic_t fclone_ref; + refcount_t fclone_ref; }; /** @@ -968,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk, fclones = container_of(skb, struct sk_buff_fclones, skb1); return skb->fclone == SKB_FCLONE_ORIG && - atomic_read(&fclones->fclone_ref) > 1 && + refcount_read(&fclones->fclone_ref) > 1 && fclones->skb2.sk == sk; } @@ -1001,10 +968,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); -int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, - int offset, int len); -int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, - int len); +int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); +int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); int skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) consume_skb(a) @@ -1316,7 +1283,7 @@ static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, */ static inline struct sk_buff *skb_get(struct sk_buff *skb) { - atomic_inc(&skb->users); + refcount_inc(&skb->users); return skb; } @@ -1417,7 +1384,7 @@ static inline void __skb_header_release(struct sk_buff *skb) */ static inline int skb_shared(const struct sk_buff *skb) { - return atomic_read(&skb->users) != 1; + return refcount_read(&skb->users) != 1; } /** @@ -1926,41 +1893,87 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) /* * Add data to an sk_buff */ -unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); -unsigned char *skb_put(struct sk_buff *skb, unsigned int len); -static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) +void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); +void *skb_put(struct sk_buff *skb, unsigned int len); +static inline void *__skb_put(struct sk_buff *skb, unsigned int len) { - unsigned char *tmp = skb_tail_pointer(skb); + void *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; return tmp; } -unsigned char *skb_push(struct sk_buff *skb, unsigned int len); -static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) +static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len) +{ + void *tmp = __skb_put(skb, len); + + memset(tmp, 0, len); + return tmp; +} + +static inline void *__skb_put_data(struct sk_buff *skb, const void *data, + unsigned int len) +{ + void *tmp = __skb_put(skb, len); + + memcpy(tmp, data, len); + return tmp; +} + +static inline void __skb_put_u8(struct sk_buff *skb, u8 val) +{ + *(u8 *)__skb_put(skb, 1) = val; +} + +static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len) +{ + void *tmp = skb_put(skb, len); + + memset(tmp, 0, len); + + return tmp; +} + +static inline void *skb_put_data(struct sk_buff *skb, const void *data, + unsigned int len) +{ + void *tmp = skb_put(skb, len); + + memcpy(tmp, data, len); + + return tmp; +} + +static inline void skb_put_u8(struct sk_buff *skb, u8 val) +{ + *(u8 *)skb_put(skb, 1) = val; +} + +void *skb_push(struct sk_buff *skb, unsigned int len); +static inline void *__skb_push(struct sk_buff *skb, unsigned int len) { skb->data -= len; skb->len += len; return skb->data; } -unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); -static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) +void *skb_pull(struct sk_buff *skb, unsigned int len); +static inline void *__skb_pull(struct sk_buff *skb, unsigned int len) { skb->len -= len; BUG_ON(skb->len < skb->data_len); return skb->data += len; } -static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) +static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len) { return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); } -unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); +void *__pskb_pull_tail(struct sk_buff *skb, int delta); -static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) +static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && !__pskb_pull_tail(skb, len - skb_headlen(skb))) @@ -1969,7 +1982,7 @@ static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) return skb->data += len; } -static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) +static inline void *pskb_pull(struct sk_buff *skb, unsigned int len) { return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); } @@ -2193,6 +2206,11 @@ static inline int skb_mac_offset(const struct sk_buff *skb) return skb_mac_header(skb) - skb->data; } +static inline u32 skb_mac_header_len(const struct sk_buff *skb) +{ + return skb->network_header - skb->mac_header; +} + static inline int skb_mac_header_was_set(const struct sk_buff *skb) { return skb->mac_header != (typeof(skb->mac_header))~0U; @@ -2952,7 +2970,7 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb, __skb_postpush_rcsum(skb, start, len, 0); } -unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); /** * skb_push_rcsum - push skb and update receive checksum @@ -2965,8 +2983,7 @@ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); * that the checksum difference is zero (e.g., a valid IP header) * or you are setting ip_summed to CHECKSUM_NONE. */ -static inline unsigned char *skb_push_rcsum(struct sk_buff *skb, - unsigned int len) +static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len) { skb_push(skb, len); skb_postpush_rcsum(skb, skb->data, len); @@ -3056,6 +3073,13 @@ static inline void skb_frag_list_init(struct sk_buff *skb) int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, const struct sk_buff *skb); +struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, + struct sk_buff_head *queue, + unsigned int flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb), + int *peeked, int *off, int *err, + struct sk_buff **last); struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, void (*destructor)(struct sock *sk, struct sk_buff *skb), @@ -3129,6 +3153,8 @@ struct skb_checksum_ops { __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); }; +extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly; + __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum, const struct skb_checksum_ops *ops); __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, @@ -3298,13 +3324,6 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps); -static inline void sw_tx_timestamp(struct sk_buff *skb) -{ - if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && - !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) - skb_tstamp_tx(skb, NULL); -} - /** * skb_tx_timestamp() - Driver hook for transmit timestamping * @@ -3320,7 +3339,8 @@ static inline void sw_tx_timestamp(struct sk_buff *skb) static inline void skb_tx_timestamp(struct sk_buff *skb) { skb_clone_tx_timestamp(skb); - sw_tx_timestamp(skb); + if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) + skb_tstamp_tx(skb, NULL); } /** @@ -3386,21 +3406,6 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) } } -static inline void __skb_mark_checksum_bad(struct sk_buff *skb) -{ - /* Mark current checksum as bad (typically called from GRO - * path). In the case that ip_summed is CHECKSUM_NONE - * this must be the first checksum encountered in the packet. - * When ip_summed is CHECKSUM_UNNECESSARY, this is the first - * checksum after the last one validated. For UDP, a zero - * checksum can not be marked as bad. - */ - - if (skb->ip_summed == CHECKSUM_NONE || - skb->ip_summed == CHECKSUM_UNNECESSARY) - skb->csum_bad = 1; -} - /* Check if we need to perform checksum complete validation. * * Returns true if checksum complete is needed, false otherwise @@ -3454,9 +3459,6 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, skb->csum_valid = 1; return 0; } - } else if (skb->csum_bad) { - /* ip_summed == CHECKSUM_NONE in this case */ - return (__force __sum16)1; } skb->csum = psum; @@ -3516,8 +3518,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) static inline bool __skb_checksum_convert_check(struct sk_buff *skb) { - return (skb->ip_summed == CHECKSUM_NONE && - skb->csum_valid && !skb->csum_bad); + return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); } static inline void __skb_checksum_convert(struct sk_buff *skb, @@ -3593,13 +3594,13 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct) #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) { - if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) + if (nf_bridge && refcount_dec_and_test(&nf_bridge->use)) kfree(nf_bridge); } static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) { if (nf_bridge) - atomic_inc(&nf_bridge->use); + refcount_inc(&nf_bridge->use); } #endif /* CONFIG_BRIDGE_NETFILTER */ static inline void nf_reset(struct sk_buff *skb) diff --git a/include/linux/socket.h b/include/linux/socket.h index 082027457825..8b13db5163cc 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -334,6 +334,7 @@ struct ucred { #define SOL_ALG 279 #define SOL_NFC 280 #define SOL_KCM 281 +#define SOL_TLS 282 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 3921cb9dfadb..108739ff9223 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -177,6 +177,7 @@ struct plat_stmmacenet_data { void (*fix_mac_speed)(void *priv, unsigned int speed); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); + struct mac_device_info *(*setup)(void *priv); void *bsp_priv; struct clk *stmmac_clk; struct clk *pclk; @@ -185,6 +186,7 @@ struct plat_stmmacenet_data { struct reset_control *stmmac_rst; struct stmmac_axi *axi; int has_gmac4; + bool has_sun8i; bool tso_en; int mac_port_sel_speed; bool en_tx_lpi_clockgating; diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h index 36eebc451b41..cebdf8745901 100644 --- a/include/linux/sunrpc/auth_gss.h +++ b/include/linux/sunrpc/auth_gss.h @@ -13,6 +13,7 @@ #define _LINUX_SUNRPC_AUTH_GSS_H #ifdef __KERNEL__ +#include <linux/refcount.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/gss_api.h> @@ -65,7 +66,7 @@ struct rpc_gss_init_res { * the wire when communicating with a server. */ struct gss_cl_ctx { - atomic_t count; + refcount_t count; enum rpc_gss_proc gc_proc; u32 gc_seq; spinlock_t gc_seq_lock; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index b6d5adcee8fc..542ca1ae02c4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -123,7 +123,7 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; - struct skb_mstamp snt_synack; /* first SYNACK sent time */ + u64 snt_synack; /* first SYNACK sent time */ bool tfo_listener; u32 txhash; u32 rcv_isn; @@ -211,7 +211,7 @@ struct tcp_sock { /* Information of the most recently (s)acked skb */ struct tcp_rack { - struct skb_mstamp mstamp; /* (Re)sent time of the skb */ + u64 mstamp; /* (Re)sent time of the skb */ u32 rtt_us; /* Associated RTT */ u32 end_seq; /* Ending TCP sequence of the skb */ u8 advanced; /* mstamp advanced since last lost marking */ @@ -240,7 +240,7 @@ struct tcp_sock { u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ /* RTT measurement */ - struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */ + u64 tcp_mstamp; /* most recent packet received/sent */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ u32 mdev_us; /* medium deviation */ u32 mdev_max_us; /* maximal mdev for the last rtt period */ @@ -280,8 +280,8 @@ struct tcp_sock { u32 delivered; /* Total data packets delivered incl. rexmits */ u32 lost; /* Total data packets lost incl. rexmits */ u32 app_limited; /* limited until "delivered" reaches this val */ - struct skb_mstamp first_tx_mstamp; /* start of window send phase */ - struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */ + u64 first_tx_mstamp; /* start of window send phase */ + u64 delivered_mstamp; /* time we reached "delivered" */ u32 rate_delivered; /* saved rate sample: packets delivered */ u32 rate_interval_us; /* saved rate sample: time elapsed */ @@ -293,6 +293,8 @@ struct tcp_sock { u32 sacked_out; /* SACK'd packets */ u32 fackets_out; /* FACK'd packets */ + struct hrtimer pacing_timer; + /* from STCP, retrans queue hinting */ struct sk_buff* lost_skb_hint; struct sk_buff *retransmit_skb_hint; @@ -333,16 +335,16 @@ struct tcp_sock { /* Receiver side RTT estimation */ struct { - u32 rtt_us; - u32 seq; - struct skb_mstamp time; + u32 rtt_us; + u32 seq; + u64 time; } rcv_rtt_est; /* Receiver queue space */ struct { - int space; - u32 seq; - struct skb_mstamp time; + int space; + u32 seq; + u64 time; } rcvq_space; /* TCP-specific MTU probe information. */ diff --git a/include/linux/udp.h b/include/linux/udp.h index 6cb4061a720d..eaea63bc79bb 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -80,6 +80,9 @@ struct udp_sock { struct sk_buff *skb, int nhoff); + /* udp_recvmsg try to use this before splicing sk_receive_queue */ + struct sk_buff_head reader_queue ____cacheline_aligned_in_smp; + /* This field is dirtied by udp_recvmsg() */ int forward_deficit; }; diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 00d232406f18..021f7a88f52c 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -117,6 +117,9 @@ struct cdc_ncm_ctx { u32 tx_curr_frame_num; u32 rx_max; u32 tx_max; + u32 tx_curr_size; + u32 tx_low_mem_max_cnt; + u32 tx_low_mem_val; u32 max_datagram_size; u16 tx_max_datagrams; u16 tx_remainder; |