summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt8
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt27
-rw-r--r--Documentation/networking/ieee802154.txt26
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile11
-rw-r--r--arch/arm/boot/dts/hisi-x5hd2.dtsi6
-rw-r--r--arch/powerpc/boot/Makefile3
-rw-r--r--arch/powerpc/boot/opal.c2
-rw-r--r--arch/powerpc/kernel/eeh_driver.c4
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/mm/hash64_4k.c2
-rw-r--r--arch/powerpc/mm/hash64_64k.c4
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/drbg.c29
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/lanai.c1
-rw-r--r--drivers/bluetooth/hci_bcsp.c4
-rw-r--r--drivers/bluetooth/hci_h5.c4
-rw-r--r--drivers/bluetooth/hci_qca.c9
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c5
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c11
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c175
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c178
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h7
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h34
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/alacritech/Kconfig28
-rw-r--r--drivers/net/ethernet/alacritech/Makefile4
-rw-r--r--drivers/net/ethernet/alacritech/slic.h575
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c1882
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c416
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c502
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1573
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c346
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h93
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c17
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c3
-rw-r--r--drivers/net/ethernet/ethoc.c44
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c352
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c76
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c85
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c485
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c46
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h82
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c238
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c233
-rw-r--r--drivers/net/ethernet/ti/cpts.h80
-rw-r--r--drivers/net/ieee802154/at86rf230.c16
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/irda/irda-usb.c1
-rw-r--r--drivers/net/irda/w83977af_ir.c323
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/nlmon.c20
-rw-r--r--drivers/net/phy/dp83848.c4
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/virtio_net.c19
-rw-r--r--include/crypto/drbg.h2
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/filter.h7
-rw-r--r--include/linux/tcp.h12
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/bluetooth/bluetooth.h25
-rw-r--r--include/net/gen_stats.h17
-rw-r--r--include/net/netfilter/xt_rateest.h10
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sock.h63
-rw-r--r--include/uapi/linux/if.h4
-rw-r--r--include/uapi/linux/netfilter/Kbuild1
-rw-r--r--include/uapi/linux/pkt_cls.h1
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h1
-rw-r--r--kernel/bpf/core.c65
-rw-r--r--kernel/bpf/syscall.c28
-rw-r--r--kernel/bpf/verifier.c14
-rw-r--r--kernel/time/clocksource.c1
-rw-r--r--mm/shmem.c15
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/workingset.c2
-rw-r--r--net/batman-adv/translation-table.c4
-rw-r--r--net/bridge/br_sysfs_br.c1
-rw-r--r--net/caif/caif_socket.c5
-rw-r--r--net/core/datagram.c19
-rw-r--r--net/core/filter.c15
-rw-r--r--net/core/gen_estimator.c299
-rw-r--r--net/core/gen_stats.c20
-rw-r--r--net/dcb/dcbnl.c1
-rw-r--r--net/ieee802154/nl-phy.c6
-rw-r--r--net/ipv4/fib_trie.c68
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_dctcp.c9
-rw-r--r--net/ipv4/tcp_input.c22
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c91
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/netfilter/xt_RATEEST.c4
-rw-r--r--net/netfilter/xt_rateest.c28
-rw-r--r--net/netlink/af_netlink.c32
-rw-r--r--net/sched/act_api.c9
-rw-r--r--net/sched/act_bpf.c9
-rw-r--r--net/sched/act_police.c21
-rw-r--r--net/sched/cls_bpf.c38
-rw-r--r--net/sched/cls_flower.c5
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_cbq.c6
-rw-r--r--net/sched/sch_drr.c6
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c6
-rw-r--r--net/sched/sch_qfq.c8
-rw-r--r--sound/sparc/dbri.c3
-rwxr-xr-xtools/hv/bondvf.sh4
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c194
176 files changed, 7841 insertions, 2252 deletions
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 5ad439f30135..ebda7c93453a 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -20,8 +20,6 @@ Required properties:
- slaves : Specifies number for slaves
- active_slave : Specifies the slave to use for time stamping,
ethtool and SIOCGMIIPHY
-- cpts_clock_mult : Numerator to convert input clock ticks into nanoseconds
-- cpts_clock_shift : Denominator to convert input clock ticks into nanoseconds
Optional properties:
- ti,hwmods : Must be "cpgmac0"
@@ -35,7 +33,11 @@ Optional properties:
For example in dra72x-evm, pcf gpio has to be
driven low so that cpsw slave 0 and phy data
lines are connected via mux.
-
+- cpts_clock_mult : Numerator to convert input clock ticks into nanoseconds
+- cpts_clock_shift : Denominator to convert input clock ticks into nanoseconds
+ Mult and shift will be calculated basing on CPTS
+ rftclk frequency if both cpts_clock_shift and
+ cpts_clock_mult properties are not provided.
Slave Properties:
Required properties:
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
index 75d398bb1fbb..063c02da018a 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
@@ -1,7 +1,12 @@
Hisilicon hix5hd2 gmac controller
Required properties:
-- compatible: should be "hisilicon,hix5hd2-gmac".
+- compatible: should contain one of the following SoC strings:
+ * "hisilicon,hix5hd2-gemac"
+ * "hisilicon,hi3798cv200-gemac"
+ and one of the following version string:
+ * "hisilicon,hisi-gemac-v1"
+ * "hisilicon,hisi-gemac-v2"
- reg: specifies base physical address(s) and size of the device registers.
The first region is the MAC register base and size.
The second region is external interface control register.
@@ -12,6 +17,16 @@ Required properties:
- phy-handle: see ethernet.txt [1].
- mac-address: see ethernet.txt [1].
- clocks: clock phandle and specifier pair.
+- clock-names: contain the clock name "mac_core"(required) and "mac_ifc"(optional).
+- resets: should contain the phandle to the MAC core reset signal(optional),
+ the MAC interface reset signal(optional)
+ and the PHY reset signal(optional).
+- reset-names: contain the reset signal name "mac_core"(optional),
+ "mac_ifc"(optional) and "phy"(optional).
+- hisilicon,phy-reset-delays-us: triplet of delays if PHY reset signal given.
+ The 1st cell is reset pre-delay in micro seconds.
+ The 2nd cell is reset pulse in micro seconds.
+ The 3rd cell is reset post-delay in micro seconds.
- PHY subnode: inherits from phy binding [2]
@@ -20,15 +35,19 @@ Required properties:
Example:
gmac0: ethernet@f9840000 {
- compatible = "hisilicon,hix5hd2-gmac";
+ compatible = "hisilicon,hi3798cv200-gemac", "hisilicon,hisi-gemac-v2";
reg = <0xf9840000 0x1000>,<0xf984300c 0x4>;
interrupts = <0 71 4>;
#address-cells = <1>;
#size-cells = <0>;
- phy-mode = "mii";
+ phy-mode = "rgmii";
phy-handle = <&phy2>;
mac-address = [00 00 00 00 00 00];
- clocks = <&clock HIX5HD2_MAC0_CLK>;
+ clocks = <&crg HISTB_ETH0_MAC_CLK>, <&crg HISTB_ETH0_MACIF_CLK>;
+ clock-names = "mac_core", "mac_ifc";
+ resets = <&crg 0xcc 8>, <&crg 0xcc 10>, <&crg 0xcc 12>;
+ reset-names = "mac_core", "mac_ifc", "phy";
+ hisilicon,phy-reset-delays-us = <10000 10000 30000>;
phy2: ethernet-phy@2 {
reg = <2>;
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index aa69ccc481db..c4114346f054 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -4,20 +4,20 @@
Introduction
============
-The IEEE 802.15.4 working group focuses on standardization of bottom
-two layers: Medium Access Control (MAC) and Physical (PHY). And there
+The IEEE 802.15.4 working group focuses on standardization of the bottom
+two layers: Medium Access Control (MAC) and Physical access (PHY). And there
are mainly two options available for upper layers:
- ZigBee - proprietary protocol from the ZigBee Alliance
- 6LoWPAN - IPv6 networking over low rate personal area networks
-The linux-wpan project goal is to provide a complete implementation
+The goal of the Linux-wpan is to provide a complete implementation
of the IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack
of protocols for organizing Low-Rate Wireless Personal Area Networks.
The stack is composed of three main parts:
- IEEE 802.15.4 layer; We have chosen to use plain Berkeley socket API,
- the generic Linux networking stack to transfer IEEE 802.15.4 messages
- and a special protocol over genetlink for configuration/management
+ the generic Linux networking stack to transfer IEEE 802.15.4 data
+ messages and a special protocol over netlink for configuration/management
- MAC - provides access to shared channel and reliable data delivery
- PHY - represents device drivers
@@ -33,15 +33,13 @@ include/net/af_ieee802154.h header or in the special header
in the userspace package (see either http://wpan.cakelab.org/ or the
git tree at https://github.com/linux-wpan/wpan-tools).
-One can use SOCK_RAW for passing raw data towards device xmit function. YMMV.
-
Kernel side
=============
Like with WiFi, there are several types of devices implementing IEEE 802.15.4.
1) 'HardMAC'. The MAC layer is implemented in the device itself, the device
- exports MLME and data API.
+ exports a management (e.g. MLME) and data API.
2) 'SoftMAC' or just radio. These types of devices are just radio transceivers
possibly with some kinds of acceleration like automatic CRC computation and
comparation, automagic ACK handling, address matching, etc.
@@ -106,7 +104,7 @@ Fake drivers
In addition there is a driver available which simulates a real device with
SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option
-provides possibility to test and debug stack without usage of real hardware.
+provides a possibility to test and debug the stack without usage of real hardware.
See sources in drivers/net/ieee802154 folder for more details.
@@ -125,17 +123,15 @@ to support the IPv6 minimum MTU requirement [RFC2460], and stateless header
compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the
relatively large IPv6 and UDP headers down to (in the best case) several bytes.
-In Semptember 2011 the standard update was published - [RFC6282].
+In September 2011 the standard update was published - [RFC6282].
It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
used in this Linux implementation.
All the code related to 6lowpan you may find in files: net/6lowpan/*
and net/ieee802154/6lowpan/*
-To setup 6lowpan interface you need (busybox release > 1.17.0):
-1. Add IEEE802.15.4 interface and initialize PANid;
+To setup a 6LoWPAN interface you need:
+1. Add IEEE802.15.4 interface and set channel and PAN ID;
2. Add 6lowpan interface by command like:
# ip link add link wpan0 name lowpan0 type lowpan
-3. Set MAC (if needs):
- # ip link set lowpan0 address de:ad:be:ef:ca:fe:ba:be
-4. Bring up 'lowpan0' interface
+3. Bring up 'lowpan0' interface
diff --git a/MAINTAINERS b/MAINTAINERS
index e773ad5446e3..331f6af61731 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -577,6 +577,11 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/usb/airspy/
+ALACRITECH GIGABIT ETHERNET DRIVER
+M: Lino Sanfilippo <LinoSanfilippo@gmx.de>
+S: Maintained
+F: drivers/net/ethernet/alacritech/*
+
ALCATEL SPEEDTOUCH USB DRIVER
M: Duncan Sands <duncan.sands@free.fr>
L: linux-usb@vger.kernel.org
@@ -6105,6 +6110,7 @@ F: drivers/idle/i7300_idle.c
IEEE 802.15.4 SUBSYSTEM
M: Alexander Aring <aar@pengutronix.de>
+M: Stefan Schmidt <stefan@osg.samsung.com>
L: linux-wpan@vger.kernel.org
W: http://wpan.cakelab.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
diff --git a/Makefile b/Makefile
index 9f9c3b577c75..369099dc0fae 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
@@ -607,6 +607,13 @@ else
include/config/auto.conf: ;
endif # $(dot-config)
+# For the kernel to actually contain only the needed exported symbols,
+# we have to build modules as well to determine what those symbols are.
+# (this can be evaluated only once include/config/auto.conf has been included)
+ifdef CONFIG_TRIM_UNUSED_KSYMS
+ KBUILD_MODULES := 1
+endif
+
# The all: target is the default when no target is given on the
# command line.
# This allow a user to issue only 'make' to build a kernel including modules
@@ -944,7 +951,7 @@ ifdef CONFIG_GDB_SCRIPTS
endif
ifdef CONFIG_TRIM_UNUSED_KSYMS
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
- "$(MAKE) KBUILD_MODULES=1 -f $(srctree)/Makefile vmlinux_prereq"
+ "$(MAKE) -f $(srctree)/Makefile vmlinux"
endif
# standalone target for easier testing
diff --git a/arch/arm/boot/dts/hisi-x5hd2.dtsi b/arch/arm/boot/dts/hisi-x5hd2.dtsi
index fdcc23d203e5..0da76c5ff6d7 100644
--- a/arch/arm/boot/dts/hisi-x5hd2.dtsi
+++ b/arch/arm/boot/dts/hisi-x5hd2.dtsi
@@ -436,18 +436,20 @@
};
gmac0: ethernet@1840000 {
- compatible = "hisilicon,hix5hd2-gmac";
+ compatible = "hisilicon,hix5hd2-gemac", "hisilicon,hisi-gemac-v1";
reg = <0x1840000 0x1000>,<0x184300c 0x4>;
interrupts = <0 71 4>;
clocks = <&clock HIX5HD2_MAC0_CLK>;
+ clock-names = "mac_core";
status = "disabled";
};
gmac1: ethernet@1841000 {
- compatible = "hisilicon,hix5hd2-gmac";
+ compatible = "hisilicon,hix5hd2-gemac", "hisilicon,hisi-gemac-v1";
reg = <0x1841000 0x1000>,<0x1843010 0x4>;
interrupts = <0 72 4>;
clocks = <&clock HIX5HD2_MAC1_CLK>;
+ clock-names = "mac_core";
status = "disabled";
};
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index eae2dc8bc218..9d47f2efa830 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -100,7 +100,8 @@ src-wlib-y := string.S crt0.S crtsavres.S stdio.c decompress.c main.c \
ns16550.c serial.c simple_alloc.c div64.S util.S \
elf_util.c $(zlib-y) devtree.c stdlib.c \
oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \
- uartlite.c mpc52xx-psc.c opal.c opal-calls.S
+ uartlite.c mpc52xx-psc.c opal.c
+src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S
src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
index d7b4fd47eb44..0272570d02de 100644
--- a/arch/powerpc/boot/opal.c
+++ b/arch/powerpc/boot/opal.c
@@ -13,7 +13,7 @@
#include <libfdt.h>
#include "../include/asm/opal-api.h"
-#ifdef __powerpc64__
+#ifdef CONFIG_PPC64_BOOT_WRAPPER
/* Global OPAL struct used by opal-call.S */
struct opal {
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index a62be72da274..5c31369435f2 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -671,8 +671,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
/* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false);
- if (rc)
+ if (rc) {
+ pci_unlock_rescan_remove();
return rc;
+ }
/* Give the system 5 seconds to finish running the user-space
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8295f51c1a5f..7394b770ae1f 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -94,8 +94,17 @@ SECTIONS
* detected, and will result in a crash at boot due to offsets being
* wrong.
*/
+#ifdef CONFIG_PPC64
+ /*
+ * BLOCK(0) overrides the default output section alignment because
+ * this needs to start right after .head.text in order for fixed
+ * section placement to work.
+ */
+ .text BLOCK(0) : AT(ADDR(.text) - LOAD_OFFSET) {
+#else
.text : AT(ADDR(.text) - LOAD_OFFSET) {
ALIGN_FUNCTION();
+#endif
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text .fixup __ftr_alt_* .ref.text)
SCHED_TEXT
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 42c702b3be1f..6fa450c12d6d 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
*/
rflags = htab_convert_pte_flags(new_pte);
- if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+ if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 3bbbea07378c..1a68cb19b0e3 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -87,7 +87,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
subpg_pte = new_pte & ~subpg_prot;
rflags = htab_convert_pte_flags(subpg_pte);
- if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+ if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
/*
@@ -258,7 +258,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
rflags = htab_convert_pte_flags(new_pte);
- if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+ if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
diff --git a/crypto/Makefile b/crypto/Makefile
index 99cc64ac70ef..bd6a029094e6 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
+$(obj)/rsa_helper.o: $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.h
clean-files += rsapubkey-asn1.c rsapubkey-asn1.h
clean-files += rsaprivkey-asn1.c rsaprivkey-asn1.h
diff --git a/crypto/drbg.c b/crypto/drbg.c
index fb33f7d3b052..053035b5c8f8 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -262,6 +262,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inbuflen,
u8 *outbuf, u32 outlen);
#define DRBG_CTR_NULL_LEN 128
+#define DRBG_OUTSCRATCHLEN DRBG_CTR_NULL_LEN
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg,
@@ -1644,6 +1645,9 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
kfree(drbg->ctr_null_value_buf);
drbg->ctr_null_value = NULL;
+ kfree(drbg->outscratchpadbuf);
+ drbg->outscratchpadbuf = NULL;
+
return 0;
}
@@ -1708,6 +1712,15 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
alignmask + 1);
+ drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask,
+ GFP_KERNEL);
+ if (!drbg->outscratchpadbuf) {
+ drbg_fini_sym_kernel(drbg);
+ return -ENOMEM;
+ }
+ drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf,
+ alignmask + 1);
+
return alignmask;
}
@@ -1737,15 +1750,16 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *outbuf, u32 outlen)
{
struct scatterlist sg_in;
+ int ret;
sg_init_one(&sg_in, inbuf, inlen);
while (outlen) {
- u32 cryptlen = min_t(u32, inlen, outlen);
+ u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
struct scatterlist sg_out;
- int ret;
- sg_init_one(&sg_out, outbuf, cryptlen);
+ /* Output buffer may not be valid for SGL, use scratchpad */
+ sg_init_one(&sg_out, drbg->outscratchpad, cryptlen);
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
cryptlen, drbg->V);
ret = crypto_skcipher_encrypt(drbg->ctr_req);
@@ -1761,14 +1775,19 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
break;
}
default:
- return ret;
+ goto out;
}
init_completion(&drbg->ctr_completion);
+ memcpy(outbuf, drbg->outscratchpad, cryptlen);
+
outlen -= cryptlen;
}
+ ret = 0;
- return 0;
+out:
+ memzero_explicit(drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
+ return ret;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index f2aaf9e32a36..40c2d561417b 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1727,7 +1727,7 @@ static int eni_do_init(struct atm_dev *dev)
printk("\n");
printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page "
"mapping\n",dev->number);
- return error;
+ return -ENOMEM;
}
eni_dev->ioaddr = base;
eni_dev->base_diff = real_base - (unsigned long) base;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index ce43ae3e87b3..445505d9ea07 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2143,6 +2143,7 @@ static int lanai_dev_open(struct atm_dev *atmdev)
lanai->base = (bus_addr_t) ioremap(raw_base, LANAI_MAPPING_SIZE);
if (lanai->base == NULL) {
printk(KERN_ERR DEV_LABEL ": couldn't remap I/O space\n");
+ result = -ENOMEM;
goto error_pci;
}
/* 3.3: Reset lanai and PHY */
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index a2c921faaa12..910ec968f022 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -733,9 +733,7 @@ static int bcsp_open(struct hci_uart *hu)
skb_queue_head_init(&bcsp->rel);
skb_queue_head_init(&bcsp->unrel);
- init_timer(&bcsp->tbcsp);
- bcsp->tbcsp.function = bcsp_timed_event;
- bcsp->tbcsp.data = (u_long)hu;
+ setup_timer(&bcsp->tbcsp, bcsp_timed_event, (u_long)hu);
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 0879d64b1caf..90d0456b6744 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -204,9 +204,7 @@ static int h5_open(struct hci_uart *hu)
h5_reset_rx(h5);
- init_timer(&h5->timer);
- h5->timer.function = h5_timed_event;
- h5->timer.data = (unsigned long)hu;
+ setup_timer(&h5->timer, h5_timed_event, (unsigned long)hu);
h5->tx_win = H5_TX_WIN_MAX;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 6c867fbc56a7..05c230719a47 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -438,14 +438,11 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
- init_timer(&qca->wake_retrans_timer);
- qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
- qca->wake_retrans_timer.data = (u_long)hu;
+ setup_timer(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout,
+ (u_long)hu);
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
- init_timer(&qca->tx_idle_timer);
- qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
- qca->tx_idle_timer.data = (u_long)hu;
+ setup_timer(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, (u_long)hu);
qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index ec64fbcdeb49..199b0bb69b89 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -422,7 +422,7 @@ static inline void get_aes_decrypt_key(unsigned char *dec_key,
{
u32 temp;
u32 w_ring[MAX_NK];
- int i, j, k = 0;
+ int i, j, k;
u8 nr, nk;
switch (keylength) {
@@ -460,6 +460,7 @@ static inline void get_aes_decrypt_key(unsigned char *dec_key,
temp = w_ring[i % nk];
i++;
}
+ i--;
for (k = 0, j = i % nk; k < nk; k++) {
*((u32 *)dec_key + k) = htonl(w_ring[j]);
j--;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 02ca5dd978f6..6c343a933182 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -485,7 +485,6 @@ static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id,
*/
static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
{
- struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
acpi_handle dhandle, atpx_handle;
acpi_status status;
@@ -500,7 +499,6 @@ static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
}
amdgpu_atpx_priv.dhandle = dhandle;
amdgpu_atpx_priv.atpx.handle = atpx_handle;
- amdgpu_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
return true;
}
@@ -562,17 +560,25 @@ static bool amdgpu_atpx_detect(void)
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
+ bool d3_supported = false;
+ struct pci_dev *parent_pdev;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
if (has_atpx && vga_count == 2) {
@@ -580,6 +586,7 @@ static bool amdgpu_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
amdgpu_atpx_priv.atpx_detected = true;
+ amdgpu_atpx_priv.bridge_pm_usable = d3_supported;
amdgpu_atpx_init();
return true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 4ccc0b72324d..71bb2f8dc157 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -2214,6 +2214,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t tmp;
int result;
bool error = false;
@@ -2233,8 +2234,10 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
offsetof(SMU74_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
- if (!result)
+ if (!result) {
+ data->soft_regs_start = tmp;
smu_data->smu7_data.soft_regs_start = tmp;
+ }
error |= (0 != result);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index fb6a418ce6be..e138fb51e8ce 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -375,7 +375,6 @@ static int hdlcd_drm_bind(struct device *dev)
err_fbdev:
drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
drm_vblank_cleanup(drm);
err_vblank:
pm_runtime_disable(drm->dev);
@@ -387,6 +386,7 @@ err_unload:
drm_irq_uninstall(drm);
of_reserved_mem_device_release(drm->dev);
err_free:
+ drm_mode_config_cleanup(drm);
dev_set_drvdata(dev, NULL);
drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 0ad2c47f808f..71c3473476c7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -254,10 +254,12 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
req->value = dev->mode_config.async_page_flip;
break;
case DRM_CAP_PAGE_FLIP_TARGET:
- req->value = 1;
- drm_for_each_crtc(crtc, dev) {
- if (!crtc->funcs->page_flip_target)
- req->value = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ req->value = 1;
+ drm_for_each_crtc(crtc, dev) {
+ if (!crtc->funcs->page_flip_target)
+ req->value = 0;
+ }
}
break;
case DRM_CAP_CURSOR_WIDTH:
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 91ab7e9d6d2e..00eb4814b913 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2268,7 +2268,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- goto err_pages;
+ goto err_sg;
}
}
#ifdef CONFIG_SWIOTLB
@@ -2311,8 +2311,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
return 0;
-err_pages:
+err_sg:
sg_mark_end(sg);
+err_pages:
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 81c11499bcf0..3cb70d73239b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12260,7 +12260,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
ret = -EIO;
- goto cleanup;
+ goto unlock;
}
atomic_inc(&intel_crtc->unpin_work_count);
@@ -12352,6 +12352,7 @@ cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
+unlock:
mutex_unlock(&dev->struct_mutex);
cleanup:
crtc->primary->fb = old_fb;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 4129b12521a6..0ae13cd2adda 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -479,7 +479,6 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
*/
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
{
- struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
acpi_handle dhandle, atpx_handle;
acpi_status status;
@@ -493,7 +492,6 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
- radeon_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
return true;
}
@@ -555,11 +553,16 @@ static bool radeon_atpx_detect(void)
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
+ bool d3_supported = false;
+ struct pci_dev *parent_pdev;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
/* some newer PX laptops mark the dGPU as a non-VGA display device */
@@ -567,6 +570,9 @@ static bool radeon_atpx_detect(void)
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
if (has_atpx && vga_count == 2) {
@@ -574,6 +580,7 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
+ radeon_atpx_priv.bridge_pm_usable = d3_supported;
radeon_atpx_init();
return true;
}
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 9600cd771f1a..e034ed847ff3 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1499,6 +1499,7 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_INFO
"HFC-4S/8S: failed to request address space at 0x%04x\n",
hw->iobase);
+ err = -EBUSY;
goto out;
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index ca453f3243cd..4da379f28d5d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -527,56 +527,18 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
{
- u16 val;
- int i, err;
-
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
- val & ~GLOBAL_CONTROL_PPU_ENABLE);
- if (err)
- return err;
-
- for (i = 0; i < 16; i++) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
- if (err)
- return err;
-
- usleep_range(1000, 2000);
- if ((val & GLOBAL_STATUS_PPU_MASK) != GLOBAL_STATUS_PPU_POLLING)
- return 0;
- }
+ if (!chip->info->ops->ppu_disable)
+ return 0;
- return -ETIMEDOUT;
+ return chip->info->ops->ppu_disable(chip);
}
static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
{
- u16 val;
- int i, err;
-
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
- val | GLOBAL_CONTROL_PPU_ENABLE);
- if (err)
- return err;
-
- for (i = 0; i < 16; i++) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
- if (err)
- return err;
-
- usleep_range(1000, 2000);
- if ((val & GLOBAL_STATUS_PPU_MASK) == GLOBAL_STATUS_PPU_POLLING)
- return 0;
- }
+ if (!chip->info->ops->ppu_enable)
+ return 0;
- return -ETIMEDOUT;
+ return chip->info->ops->ppu_enable(chip);
}
static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
@@ -2356,17 +2318,32 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
mutex_unlock(&chip->reg_lock);
}
-static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->reset)
+ return chip->info->ops->reset(chip);
+
+ return 0;
+}
+
+static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
{
- bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE);
- u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
struct gpio_desc *gpiod = chip->reset;
- unsigned long timeout;
- u16 reg;
- int err;
- int i;
- /* Set all ports to the disabled state. */
+ /* If there is a GPIO connected to the reset pin, toggle it */
+ if (gpiod) {
+ gpiod_set_value_cansleep(gpiod, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gpiod, 0);
+ usleep_range(10000, 20000);
+ }
+}
+
+static int mv88e6xxx_disable_ports(struct mv88e6xxx_chip *chip)
+{
+ int i, err;
+
+ /* Set all ports to the Disabled state */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
err = mv88e6xxx_port_set_state(chip, i,
PORT_CONTROL_STATE_DISABLED);
@@ -2374,45 +2351,25 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
return err;
}
- /* Wait for transmit queues to drain. */
+ /* Wait for transmit queues to drain,
+ * i.e. 2ms for a maximum frame to be transmitted at 10 Mbps.
+ */
usleep_range(2000, 4000);
- /* If there is a gpio connected to the reset pin, toggle it */
- if (gpiod) {
- gpiod_set_value_cansleep(gpiod, 1);
- usleep_range(10000, 20000);
- gpiod_set_value_cansleep(gpiod, 0);
- usleep_range(10000, 20000);
- }
+ return 0;
+}
- /* Reset the switch. Keep the PPU active if requested. The PPU
- * needs to be active to support indirect phy register access
- * through global registers 0x18 and 0x19.
- */
- if (ppu_active)
- err = mv88e6xxx_g1_write(chip, 0x04, 0xc000);
- else
- err = mv88e6xxx_g1_write(chip, 0x04, 0xc400);
+static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_disable_ports(chip);
if (err)
return err;
- /* Wait up to one second for reset to complete. */
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, 0x00, &reg);
- if (err)
- return err;
+ mv88e6xxx_hardware_reset(chip);
- if ((reg & is_reset) == is_reset)
- break;
- usleep_range(1000, 2000);
- }
- if (time_after(jiffies, timeout))
- err = -ETIMEDOUT;
- else
- err = 0;
-
- return err;
+ return mv88e6xxx_software_reset(chip);
}
static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
@@ -2749,22 +2706,12 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
{
struct dsa_switch *ds = chip->ds;
u32 upstream_port = dsa_upstream_port(ds);
- u16 reg;
int err;
/* Enable the PHY Polling Unit if present, don't discard any packets,
* and mask all interrupt sources.
*/
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &reg);
- if (err < 0)
- return err;
-
- reg &= ~GLOBAL_CONTROL_PPU_ENABLE;
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU) ||
- mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE))
- reg |= GLOBAL_CONTROL_PPU_ENABLE;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, reg);
+ err = mv88e6xxx_ppu_enable(chip);
if (err)
return err;
@@ -3226,6 +3173,9 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -3243,6 +3193,9 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6097_ops = {
@@ -3267,6 +3220,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6123_ops = {
@@ -3286,6 +3240,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6131_ops = {
@@ -3310,6 +3265,9 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -3334,6 +3292,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6165_ops = {
@@ -3351,6 +3310,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
@@ -3376,6 +3336,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -3403,6 +3364,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -3428,6 +3390,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -3455,6 +3418,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -3475,6 +3439,9 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6190_ops = {
@@ -3499,6 +3466,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -3523,6 +3491,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -3547,6 +3516,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -3574,6 +3544,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -3598,6 +3569,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -3624,6 +3596,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -3649,6 +3622,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.stats_get_stats = mv88e6320_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -3674,6 +3648,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -3699,6 +3674,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -3726,6 +3702,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -3752,6 +3729,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -3778,6 +3756,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6391_ops = {
@@ -3802,6 +3781,7 @@ static const struct mv88e6xxx_ops mv88e6391_ops = {
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static int mv88e6xxx_verify_madatory_ops(struct mv88e6xxx_chip *chip,
@@ -3861,6 +3841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
.ops = &mv88e6097_ops,
},
@@ -4240,13 +4221,13 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
static void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
{
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
mv88e6xxx_ppu_state_init(chip);
}
static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
{
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
mv88e6xxx_ppu_state_destroy(chip);
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 44136ee015c3..75af86a7fad8 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -33,6 +33,184 @@ int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
return mv88e6xxx_wait(chip, chip->info->global1_addr, reg, mask);
}
+/* Offset 0x00: Switch Global Status Register */
+
+static int mv88e6185_g1_wait_ppu_disabled(struct mv88e6xxx_chip *chip)
+{
+ u16 state;
+ int i, err;
+
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &state);
+ if (err)
+ return err;
+
+ /* Check the value of the PPUState bits 15:14 */
+ state &= GLOBAL_STATUS_PPU_STATE_MASK;
+ if (state != GLOBAL_STATUS_PPU_STATE_POLLING)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6185_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
+{
+ u16 state;
+ int i, err;
+
+ for (i = 0; i < 16; ++i) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &state);
+ if (err)
+ return err;
+
+ /* Check the value of the PPUState bits 15:14 */
+ state &= GLOBAL_STATUS_PPU_STATE_MASK;
+ if (state == GLOBAL_STATUS_PPU_STATE_POLLING)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6352_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
+{
+ u16 state;
+ int i, err;
+
+ for (i = 0; i < 16; ++i) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &state);
+ if (err)
+ return err;
+
+ /* Check the value of the PPUState (or InitState) bit 15 */
+ if (state & GLOBAL_STATUS_PPU_STATE)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+{
+ const unsigned long timeout = jiffies + 1 * HZ;
+ u16 val;
+ int err;
+
+ /* Wait up to 1 second for the switch to be ready. The InitReady bit 11
+ * is set to a one when all units inside the device (ATU, VTU, etc.)
+ * have finished their initialization and are ready to accept frames.
+ */
+ while (time_before(jiffies, timeout)) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
+ if (err)
+ return err;
+
+ if (val & GLOBAL_STATUS_INIT_READY)
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* Offset 0x04: Switch Global Control Register */
+
+int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ /* Set the SWReset bit 15 along with the PPUEn bit 14, to also restart
+ * the PPU, including re-doing PHY detection and initialization
+ */
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
+
+ val |= GLOBAL_CONTROL_SW_RESET;
+ val |= GLOBAL_CONTROL_PPU_ENABLE;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_wait_init_ready(chip);
+ if (err)
+ return err;
+
+ return mv88e6185_g1_wait_ppu_polling(chip);
+}
+
+int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ /* Set the SWReset bit 15 */
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
+
+ val |= GLOBAL_CONTROL_SW_RESET;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_wait_init_ready(chip);
+ if (err)
+ return err;
+
+ return mv88e6352_g1_wait_ppu_polling(chip);
+}
+
+int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
+
+ val |= GLOBAL_CONTROL_PPU_ENABLE;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ if (err)
+ return err;
+
+ return mv88e6185_g1_wait_ppu_polling(chip);
+}
+
+int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
+
+ val &= ~GLOBAL_CONTROL_PPU_ENABLE;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ if (err)
+ return err;
+
+ return mv88e6185_g1_wait_ppu_disabled(chip);
+}
+
/* Offset 0x1a: Monitor Control */
/* Offset 0x1a: Monitor & MGMT Control on some devices */
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index cb61378829e6..1aec7382c02d 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -19,6 +19,13 @@
int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+
+int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+
+int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+
int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 13c7cc443454..af54baea47cf 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -193,12 +193,12 @@
#define GLOBAL_STATUS 0x00
#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
-/* Two bits for 6165, 6185 etc */
-#define GLOBAL_STATUS_PPU_MASK (0x3 << 14)
-#define GLOBAL_STATUS_PPU_DISABLED_RST (0x0 << 14)
-#define GLOBAL_STATUS_PPU_INITIALIZING (0x1 << 14)
-#define GLOBAL_STATUS_PPU_DISABLED (0x2 << 14)
-#define GLOBAL_STATUS_PPU_POLLING (0x3 << 14)
+#define GLOBAL_STATUS_PPU_STATE_MASK (0x3 << 14) /* 6165 6185 */
+#define GLOBAL_STATUS_PPU_STATE_DISABLED_RST (0x0 << 14)
+#define GLOBAL_STATUS_PPU_STATE_INITIALIZING (0x1 << 14)
+#define GLOBAL_STATUS_PPU_STATE_DISABLED (0x2 << 14)
+#define GLOBAL_STATUS_PPU_STATE_POLLING (0x3 << 14)
+#define GLOBAL_STATUS_INIT_READY BIT(11)
#define GLOBAL_STATUS_IRQ_AVB 8
#define GLOBAL_STATUS_IRQ_DEVICE 7
#define GLOBAL_STATUS_IRQ_STATS 6
@@ -490,12 +490,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */
MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
- /* PHY Polling Unit.
- * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
- */
- MV88E6XXX_CAP_PPU,
- MV88E6XXX_CAP_PPU_ACTIVE,
-
/* Per VLAN Spanning Tree Unit (STU).
* The Port State database, if present, is accessed through VTU
* operations and dedicated SID registers. See GLOBAL_VTU_SID.
@@ -537,8 +531,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_G2_PVT_DATA BIT_ULL(MV88E6XXX_CAP_G2_PVT_DATA)
#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
-#define MV88E6XXX_FLAG_PPU BIT_ULL(MV88E6XXX_CAP_PPU)
-#define MV88E6XXX_FLAG_PPU_ACTIVE BIT_ULL(MV88E6XXX_CAP_PPU_ACTIVE)
#define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU)
#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP)
#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT)
@@ -567,7 +559,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAGS_FAMILY_6095 \
(MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_MULTI_CHIP)
@@ -578,7 +569,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
@@ -605,7 +595,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_VTU)
#define MV88E6XXX_FLAGS_FAMILY_6320 \
@@ -614,7 +603,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
@@ -630,7 +618,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_VTU | \
@@ -647,7 +634,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
@@ -662,7 +648,6 @@ struct mv88e6xxx_ops;
#define MV88E6XXX_FLAGS_FAMILY_6390 \
(MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
@@ -792,6 +777,13 @@ struct mv88e6xxx_ops {
int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 val);
+ /* PHY Polling Unit (PPU) operations */
+ int (*ppu_enable)(struct mv88e6xxx_chip *chip);
+ int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+
+ /* Switch Software Reset */
+ int (*reset)(struct mv88e6xxx_chip *chip);
+
/* RGMII Receive/Transmit Timing Control
* Add delay on PHY_INTERFACE_MODE_RGMII_*ID, no delay otherwise.
*/
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 69fc8409a973..6421835f11b7 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -154,6 +154,9 @@ static void dummy_setup(struct net_device *dev)
dev->hw_features |= dev->features;
dev->hw_enc_features |= dev->features;
eth_hw_addr_random(dev);
+
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
}
static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 6738fbb357cd..6e16e441f85e 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -21,6 +21,7 @@ source "drivers/net/ethernet/3com/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
+source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
source "drivers/net/ethernet/altera/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index e76244521a2e..24330f4885a9 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/
obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
+obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
obj-$(CONFIG_ALTERA_TSE) += altera/
diff --git a/drivers/net/ethernet/alacritech/Kconfig b/drivers/net/ethernet/alacritech/Kconfig
new file mode 100644
index 000000000000..09496e18cdc5
--- /dev/null
+++ b/drivers/net/ethernet/alacritech/Kconfig
@@ -0,0 +1,28 @@
+config NET_VENDOR_ALACRITECH
+ bool "Alacritech devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about Alacritech devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_ALACRITECH
+
+config SLICOSS
+ tristate "Alacritech Slicoss support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver supports Gigabit Ethernet adapters based on the
+ Session Layer Interface (SLIC) technology by Alacritech.
+
+ Supported are Mojave (1 port) and Oasis (1, 2 and 4 port) cards,
+ both copper and fiber.
+
+ To compile this driver as a module, choose M here: the module
+ will be called slicoss. This is recommended.
+
+endif # NET_VENDOR_ALACRITECH
diff --git a/drivers/net/ethernet/alacritech/Makefile b/drivers/net/ethernet/alacritech/Makefile
new file mode 100644
index 000000000000..8790e9ed8496
--- /dev/null
+++ b/drivers/net/ethernet/alacritech/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for the Alacritech Slicoss driver
+#
+obj-$(CONFIG_SLICOSS) += slicoss.o
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
new file mode 100644
index 000000000000..08931b4afc96
--- /dev/null
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -0,0 +1,575 @@
+
+#ifndef _SLIC_H
+#define _SLIC_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock_types.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/u64_stats_sync.h>
+
+#define SLIC_VGBSTAT_XPERR 0x40000000
+#define SLIC_VGBSTAT_XERRSHFT 25
+#define SLIC_VGBSTAT_XCSERR 0x23
+#define SLIC_VGBSTAT_XUFLOW 0x22
+#define SLIC_VGBSTAT_XHLEN 0x20
+#define SLIC_VGBSTAT_NETERR 0x01000000
+#define SLIC_VGBSTAT_NERRSHFT 16
+#define SLIC_VGBSTAT_NERRMSK 0x1ff
+#define SLIC_VGBSTAT_NCSERR 0x103
+#define SLIC_VGBSTAT_NUFLOW 0x102
+#define SLIC_VGBSTAT_NHLEN 0x100
+#define SLIC_VGBSTAT_LNKERR 0x00000080
+#define SLIC_VGBSTAT_LERRMSK 0xff
+#define SLIC_VGBSTAT_LDEARLY 0x86
+#define SLIC_VGBSTAT_LBOFLO 0x85
+#define SLIC_VGBSTAT_LCODERR 0x84
+#define SLIC_VGBSTAT_LDBLNBL 0x83
+#define SLIC_VGBSTAT_LCRCERR 0x82
+#define SLIC_VGBSTAT_LOFLO 0x81
+#define SLIC_VGBSTAT_LUFLO 0x80
+
+#define SLIC_IRHDDR_FLEN_MSK 0x0000ffff
+#define SLIC_IRHDDR_SVALID 0x80000000
+#define SLIC_IRHDDR_ERR 0x10000000
+
+#define SLIC_VRHSTAT_802OE 0x80000000
+#define SLIC_VRHSTAT_TPOFLO 0x10000000
+#define SLIC_VRHSTATB_802UE 0x80000000
+#define SLIC_VRHSTATB_RCVE 0x40000000
+#define SLIC_VRHSTATB_BUFF 0x20000000
+#define SLIC_VRHSTATB_CARRE 0x08000000
+#define SLIC_VRHSTATB_LONGE 0x02000000
+#define SLIC_VRHSTATB_PREA 0x01000000
+#define SLIC_VRHSTATB_CRC 0x00800000
+#define SLIC_VRHSTATB_DRBL 0x00400000
+#define SLIC_VRHSTATB_CODE 0x00200000
+#define SLIC_VRHSTATB_TPCSUM 0x00100000
+#define SLIC_VRHSTATB_TPHLEN 0x00080000
+#define SLIC_VRHSTATB_IPCSUM 0x00040000
+#define SLIC_VRHSTATB_IPLERR 0x00020000
+#define SLIC_VRHSTATB_IPHERR 0x00010000
+
+#define SLIC_CMD_XMT_REQ 0x01
+#define SLIC_CMD_TYPE_DUMB 3
+
+#define SLIC_RESET_MAGIC 0xDEAD
+#define SLIC_ICR_INT_OFF 0
+#define SLIC_ICR_INT_ON 1
+#define SLIC_ICR_INT_MASK 2
+
+#define SLIC_ISR_ERR 0x80000000
+#define SLIC_ISR_RCV 0x40000000
+#define SLIC_ISR_CMD 0x20000000
+#define SLIC_ISR_IO 0x60000000
+#define SLIC_ISR_UPC 0x10000000
+#define SLIC_ISR_LEVENT 0x08000000
+#define SLIC_ISR_RMISS 0x02000000
+#define SLIC_ISR_UPCERR 0x01000000
+#define SLIC_ISR_XDROP 0x00800000
+#define SLIC_ISR_UPCBSY 0x00020000
+
+#define SLIC_ISR_PING_MASK 0x00700000
+#define SLIC_ISR_UPCERR_MASK (SLIC_ISR_UPCERR | SLIC_ISR_UPCBSY)
+#define SLIC_ISR_UPC_MASK (SLIC_ISR_UPC | SLIC_ISR_UPCERR_MASK)
+#define SLIC_WCS_START 0x80000000
+#define SLIC_WCS_COMPARE 0x40000000
+#define SLIC_RCVWCS_BEGIN 0x40000000
+#define SLIC_RCVWCS_FINISH 0x80000000
+
+#define SLIC_MIICR_REG_16 0x00100000
+#define SLIC_MRV_REG16_XOVERON 0x0068
+
+#define SLIC_GIG_LINKUP 0x0001
+#define SLIC_GIG_FULLDUPLEX 0x0002
+#define SLIC_GIG_SPEED_MASK 0x000C
+#define SLIC_GIG_SPEED_1000 0x0008
+#define SLIC_GIG_SPEED_100 0x0004
+#define SLIC_GIG_SPEED_10 0x0000
+
+#define SLIC_GMCR_RESET 0x80000000
+#define SLIC_GMCR_GBIT 0x20000000
+#define SLIC_GMCR_FULLD 0x10000000
+#define SLIC_GMCR_GAPBB_SHIFT 14
+#define SLIC_GMCR_GAPR1_SHIFT 7
+#define SLIC_GMCR_GAPR2_SHIFT 0
+#define SLIC_GMCR_GAPBB_1000 0x60
+#define SLIC_GMCR_GAPR1_1000 0x2C
+#define SLIC_GMCR_GAPR2_1000 0x40
+#define SLIC_GMCR_GAPBB_100 0x70
+#define SLIC_GMCR_GAPR1_100 0x2C
+#define SLIC_GMCR_GAPR2_100 0x40
+
+#define SLIC_XCR_RESET 0x80000000
+#define SLIC_XCR_XMTEN 0x40000000
+#define SLIC_XCR_PAUSEEN 0x20000000
+#define SLIC_XCR_LOADRNG 0x10000000
+
+#define SLIC_GXCR_RESET 0x80000000
+#define SLIC_GXCR_XMTEN 0x40000000
+#define SLIC_GXCR_PAUSEEN 0x20000000
+
+#define SLIC_GRCR_RESET 0x80000000
+#define SLIC_GRCR_RCVEN 0x40000000
+#define SLIC_GRCR_RCVALL 0x20000000
+#define SLIC_GRCR_RCVBAD 0x10000000
+#define SLIC_GRCR_CTLEN 0x08000000
+#define SLIC_GRCR_ADDRAEN 0x02000000
+#define SLIC_GRCR_HASHSIZE_SHIFT 17
+#define SLIC_GRCR_HASHSIZE 14
+
+/* Reset Register */
+#define SLIC_REG_RESET 0x0000
+/* Interrupt Control Register */
+#define SLIC_REG_ICR 0x0008
+/* Interrupt status pointer */
+#define SLIC_REG_ISP 0x0010
+/* Interrupt status */
+#define SLIC_REG_ISR 0x0018
+/* Header buffer address reg
+ * 31-8 - phy addr of set of contiguous hdr buffers
+ * 7-0 - number of buffers passed
+ * Buffers are 256 bytes long on 256-byte boundaries.
+ */
+#define SLIC_REG_HBAR 0x0020
+/* Data buffer handle & address reg
+ * 4 sets of registers; Buffers are 2K bytes long 2 per 4K page.
+ */
+#define SLIC_REG_DBAR 0x0028
+/* Xmt Cmd buf addr regs.
+ * 1 per XMT interface
+ * 31-5 - phy addr of host command buffer
+ * 4-0 - length of cmd in multiples of 32 bytes
+ * Buffers are 32 bytes up to 512 bytes long
+ */
+#define SLIC_REG_CBAR 0x0030
+/* Write control store */
+#define SLIC_REG_WCS 0x0034
+/*Response buffer address reg.
+ * 31-8 - phy addr of set of contiguous response buffers
+ * 7-0 - number of buffers passed
+ * Buffers are 32 bytes long on 32-byte boundaries.
+ */
+#define SLIC_REG_RBAR 0x0038
+/* Read statistics (UPR) */
+#define SLIC_REG_RSTAT 0x0040
+/* Read link status */
+#define SLIC_REG_LSTAT 0x0048
+/* Write Mac Config */
+#define SLIC_REG_WMCFG 0x0050
+/* Write phy register */
+#define SLIC_REG_WPHY 0x0058
+/* Rcv Cmd buf addr reg */
+#define SLIC_REG_RCBAR 0x0060
+/* Read SLIC Config*/
+#define SLIC_REG_RCONFIG 0x0068
+/* Interrupt aggregation time */
+#define SLIC_REG_INTAGG 0x0070
+/* Write XMIT config reg */
+#define SLIC_REG_WXCFG 0x0078
+/* Write RCV config reg */
+#define SLIC_REG_WRCFG 0x0080
+/* Write rcv addr a low */
+#define SLIC_REG_WRADDRAL 0x0088
+/* Write rcv addr a high */
+#define SLIC_REG_WRADDRAH 0x0090
+/* Write rcv addr b low */
+#define SLIC_REG_WRADDRBL 0x0098
+/* Write rcv addr b high */
+#define SLIC_REG_WRADDRBH 0x00a0
+/* Low bits of mcast mask */
+#define SLIC_REG_MCASTLOW 0x00a8
+/* High bits of mcast mask */
+#define SLIC_REG_MCASTHIGH 0x00b0
+/* Ping the card */
+#define SLIC_REG_PING 0x00b8
+/* Dump command */
+#define SLIC_REG_DUMP_CMD 0x00c0
+/* Dump data pointer */
+#define SLIC_REG_DUMP_DATA 0x00c8
+/* Read card's pci_status register */
+#define SLIC_REG_PCISTATUS 0x00d0
+/* Write hostid field */
+#define SLIC_REG_WRHOSTID 0x00d8
+/* Put card in a low power state */
+#define SLIC_REG_LOW_POWER 0x00e0
+/* Force slic into quiescent state before soft reset */
+#define SLIC_REG_QUIESCE 0x00e8
+/* Reset interface queues */
+#define SLIC_REG_RESET_IFACE 0x00f0
+/* Register is only written when it has changed.
+ * Bits 63-32 for host i/f addrs.
+ */
+#define SLIC_REG_ADDR_UPPER 0x00f8
+/* 64 bit Header buffer address reg */
+#define SLIC_REG_HBAR64 0x0100
+/* 64 bit Data buffer handle & address reg */
+#define SLIC_REG_DBAR64 0x0108
+/* 64 bit Xmt Cmd buf addr regs. */
+#define SLIC_REG_CBAR64 0x0110
+/* 64 bit Response buffer address reg.*/
+#define SLIC_REG_RBAR64 0x0118
+/* 64 bit Rcv Cmd buf addr reg*/
+#define SLIC_REG_RCBAR64 0x0120
+/* Read statistics (64 bit UPR) */
+#define SLIC_REG_RSTAT64 0x0128
+/* Download Gigabit RCV sequencer ucode */
+#define SLIC_REG_RCV_WCS 0x0130
+/* Write VlanId field */
+#define SLIC_REG_WRVLANID 0x0138
+/* Read Transformer info */
+#define SLIC_REG_READ_XF_INFO 0x0140
+/* Write Transformer info */
+#define SLIC_REG_WRITE_XF_INFO 0x0148
+/* Write card ticks per second */
+#define SLIC_REG_TICKS_PER_SEC 0x0170
+#define SLIC_REG_HOSTID 0x1554
+
+#define PCI_VENDOR_ID_ALACRITECH 0x139A
+#define PCI_DEVICE_ID_ALACRITECH_MOJAVE 0x0005
+#define PCI_SUBDEVICE_ID_ALACRITECH_1000X1 0x0005
+#define PCI_SUBDEVICE_ID_ALACRITECH_1000X1_2 0x0006
+#define PCI_SUBDEVICE_ID_ALACRITECH_1000X1F 0x0007
+#define PCI_SUBDEVICE_ID_ALACRITECH_CICADA 0x0008
+#define PCI_SUBDEVICE_ID_ALACRITECH_SES1001T 0x2006
+#define PCI_SUBDEVICE_ID_ALACRITECH_SES1001F 0x2007
+#define PCI_DEVICE_ID_ALACRITECH_OASIS 0x0007
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XT 0x000B
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF 0x000C
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XT 0x000D
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF 0x000E
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF 0x000F
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2104ET 0x0010
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF 0x0011
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2102ET 0x0012
+
+/* Note: power of two required for number descriptors */
+#define SLIC_NUM_RX_LES 256
+#define SLIC_RX_BUFF_SIZE 2048
+#define SLIC_RX_BUFF_ALIGN 256
+#define SLIC_RX_BUFF_HDR_SIZE 34
+#define SLIC_MAX_REQ_RX_DESCS 1
+
+#define SLIC_NUM_TX_DESCS 256
+#define SLIC_TX_DESC_ALIGN 32
+#define SLIC_MIN_TX_WAKEUP_DESCS 10
+#define SLIC_MAX_REQ_TX_DESCS 1
+#define SLIC_MAX_TX_COMPLETIONS 100
+
+#define SLIC_NUM_STAT_DESCS 128
+#define SLIC_STATS_DESC_ALIGN 256
+
+#define SLIC_NUM_STAT_DESC_ARRAYS 4
+#define SLIC_INVALID_STAT_DESC_IDX 0xffffffff
+
+#define SLIC_NAPI_WEIGHT 64
+
+#define SLIC_UPR_LSTAT 0
+#define SLIC_UPR_CONFIG 1
+
+#define SLIC_EEPROM_SIZE 128
+#define SLIC_EEPROM_MAGIC 0xa5a5
+
+#define SLIC_FIRMWARE_MOJAVE "slicoss/gbdownload.sys"
+#define SLIC_FIRMWARE_OASIS "slicoss/oasisdownload.sys"
+#define SLIC_RCV_FIRMWARE_MOJAVE "slicoss/gbrcvucode.sys"
+#define SLIC_RCV_FIRMWARE_OASIS "slicoss/oasisrcvucode.sys"
+#define SLIC_FIRMWARE_MIN_SIZE 64
+#define SLIC_FIRMWARE_MAX_SECTIONS 3
+
+#define SLIC_MODEL_MOJAVE 0
+#define SLIC_MODEL_OASIS 1
+
+#define SLIC_INC_STATS_COUNTER(st, counter) \
+do { \
+ u64_stats_update_begin(&(st)->syncp); \
+ (st)->counter++; \
+ u64_stats_update_end(&(st)->syncp); \
+} while (0)
+
+#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
+{ \
+ unsigned int start; \
+ do { \
+ start = u64_stats_fetch_begin_irq(&(st)->syncp); \
+ newst = (st)->counter; \
+ } while (u64_stats_fetch_retry_irq(&(st)->syncp, start)); \
+}
+
+struct slic_upr {
+ dma_addr_t paddr;
+ unsigned int type;
+ struct list_head list;
+};
+
+struct slic_upr_list {
+ bool pending;
+ struct list_head list;
+ /* upr list lock */
+ spinlock_t lock;
+};
+
+/* SLIC EEPROM structure for Mojave */
+struct slic_mojave_eeprom {
+ __le16 id; /* 00 EEPROM/FLASH Magic code 'A5A5'*/
+ __le16 eeprom_code_size;/* 01 Size of EEPROM Codes (bytes * 4)*/
+ __le16 flash_size; /* 02 Flash size */
+ __le16 eeprom_size; /* 03 EEPROM Size */
+ __le16 vendor_id; /* 04 Vendor ID */
+ __le16 dev_id; /* 05 Device ID */
+ u8 rev_id; /* 06 Revision ID */
+ u8 class_code[3]; /* 07 Class Code */
+ u8 irqpin_dbg; /* 08 Debug Interrupt pin */
+ u8 irqpin; /* Network Interrupt Pin */
+ u8 min_grant; /* 09 Minimum grant */
+ u8 max_lat; /* Maximum Latency */
+ __le16 pci_stat; /* 10 PCI Status */
+ __le16 sub_vendor_id; /* 11 Subsystem Vendor Id */
+ __le16 sub_id; /* 12 Subsystem ID */
+ __le16 dev_id_dbg; /* 13 Debug Device Id */
+ __le16 ramrom; /* 14 Dram/Rom function */
+ __le16 dram_size2pci; /* 15 DRAM size to PCI (bytes * 64K) */
+ __le16 rom_size2pci; /* 16 ROM extension size to PCI (bytes * 4k) */
+ u8 pad[2]; /* 17 Padding */
+ u8 freetime; /* 18 FreeTime setting */
+ u8 ifctrl; /* 10-bit interface control (Mojave only) */
+ __le16 dram_size; /* 19 DRAM size (bytes * 64k) */
+ u8 mac[ETH_ALEN]; /* 20 MAC addresses */
+ u8 mac2[ETH_ALEN];
+ u8 pad2[6];
+ u16 dev_id2; /* Device ID for 2nd PCI function */
+ u8 irqpin2; /* Interrupt pin for 2nd PCI function */
+ u8 class_code2[3]; /* Class Code for 2nd PCI function */
+ u16 cfg_byte6; /* Config Byte 6 */
+ u16 pme_cap; /* Power Mgment capabilities */
+ u16 nwclk_ctrl; /* NetworkClockControls */
+ u8 fru_format; /* Alacritech FRU format type */
+ u8 fru_assembly[6]; /* Alacritech FRU information */
+ u8 fru_rev[2];
+ u8 fru_serial[14];
+ u8 fru_pad[3];
+ u8 oem_fru[28]; /* optional OEM FRU format type */
+ u8 pad3[4]; /* Pad to 128 bytes - includes 2 cksum bytes
+ * (if OEM FRU info exists) and two unusable
+ * bytes at the end
+ */
+};
+
+/* SLIC EEPROM structure for Oasis */
+struct slic_oasis_eeprom {
+ __le16 id; /* 00 EEPROM/FLASH Magic code 'A5A5' */
+ __le16 eeprom_code_size;/* 01 Size of EEPROM Codes (bytes * 4)*/
+ __le16 spidev0_cfg; /* 02 Flash Config for SPI device 0 */
+ __le16 spidev1_cfg; /* 03 Flash Config for SPI device 1 */
+ __le16 vendor_id; /* 04 Vendor ID */
+ __le16 dev_id; /* 05 Device ID (function 0) */
+ u8 rev_id; /* 06 Revision ID */
+ u8 class_code0[3]; /* 07 Class Code for PCI function 0 */
+ u8 irqpin1; /* 08 Interrupt pin for PCI function 1*/
+ u8 class_code1[3]; /* 09 Class Code for PCI function 1 */
+ u8 irqpin2; /* 10 Interrupt pin for PCI function 2*/
+ u8 irqpin0; /* Interrupt pin for PCI function 0*/
+ u8 min_grant; /* 11 Minimum grant */
+ u8 max_lat; /* Maximum Latency */
+ __le16 sub_vendor_id; /* 12 Subsystem Vendor Id */
+ __le16 sub_id; /* 13 Subsystem ID */
+ __le16 flash_size; /* 14 Flash size (bytes / 4K) */
+ __le16 dram_size2pci; /* 15 DRAM size to PCI (bytes / 64K) */
+ __le16 rom_size2pci; /* 16 Flash (ROM extension) size to PCI
+ * (bytes / 4K)
+ */
+ __le16 dev_id1; /* 17 Device Id (function 1) */
+ __le16 dev_id2; /* 18 Device Id (function 2) */
+ __le16 dev_stat_cfg; /* 19 Device Status Config Bytes 6-7 */
+ __le16 pme_cap; /* 20 Power Mgment capabilities */
+ u8 msi_cap; /* 21 MSI capabilities */
+ u8 clock_div; /* Clock divider */
+ __le16 pci_stat_lo; /* 22 PCI Status bits 15:0 */
+ __le16 pci_stat_hi; /* 23 PCI Status bits 31:16 */
+ __le16 dram_cfg_lo; /* 24 DRAM Configuration bits 15:0 */
+ __le16 dram_cfg_hi; /* 25 DRAM Configuration bits 31:16 */
+ __le16 dram_size; /* 26 DRAM size (bytes / 64K) */
+ __le16 gpio_tbi_ctrl; /* 27 GPIO/TBI controls for functions 1/0 */
+ __le16 eeprom_size; /* 28 EEPROM Size */
+ u8 mac[ETH_ALEN]; /* 29 MAC addresses (2 ports) */
+ u8 mac2[ETH_ALEN];
+ u8 fru_format; /* 35 Alacritech FRU format type */
+ u8 fru_assembly[6]; /* Alacritech FRU information */
+ u8 fru_rev[2];
+ u8 fru_serial[14];
+ u8 fru_pad[3];
+ u8 oem_fru[28]; /* optional OEM FRU information */
+ u8 pad[4]; /* Pad to 128 bytes - includes 2 checksum bytes
+ * (if OEM FRU info exists) and two unusable
+ * bytes at the end
+ */
+};
+
+struct slic_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_mcasts;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ /* HW STATS */
+ u64 rx_buff_miss;
+ u64 tx_dropped;
+ u64 irq_errs;
+ /* transport layer */
+ u64 rx_tpcsum;
+ u64 rx_tpoflow;
+ u64 rx_tphlen;
+ /* ip layer */
+ u64 rx_ipcsum;
+ u64 rx_iplen;
+ u64 rx_iphlen;
+ /* link layer */
+ u64 rx_early;
+ u64 rx_buffoflow;
+ u64 rx_lcode;
+ u64 rx_drbl;
+ u64 rx_crc;
+ u64 rx_oflow802;
+ u64 rx_uflow802;
+ /* oasis only */
+ u64 tx_carrier;
+ struct u64_stats_sync syncp;
+};
+
+struct slic_shmem_data {
+ __le32 isr;
+ __le32 link;
+};
+
+struct slic_shmem {
+ dma_addr_t isr_paddr;
+ dma_addr_t link_paddr;
+ struct slic_shmem_data *shmem_data;
+};
+
+struct slic_rx_info_oasis {
+ __le32 frame_status;
+ __le32 frame_status_b;
+ __le32 time_stamp;
+ __le32 checksum;
+};
+
+struct slic_rx_info_mojave {
+ __le32 frame_status;
+ __le16 byte_cnt;
+ __le16 tp_chksum;
+ __le16 ctx_hash;
+ __le16 mac_hash;
+ __le16 buff_lnk;
+};
+
+struct slic_stat_desc {
+ __le32 hnd;
+ __u8 pad[8];
+ __le32 status;
+ __u8 pad2[16];
+};
+
+struct slic_stat_queue {
+ struct slic_stat_desc *descs[SLIC_NUM_STAT_DESC_ARRAYS];
+ dma_addr_t paddr[SLIC_NUM_STAT_DESC_ARRAYS];
+ unsigned int addr_offset[SLIC_NUM_STAT_DESC_ARRAYS];
+ unsigned int active_array;
+ unsigned int len;
+ unsigned int done_idx;
+ size_t mem_size;
+};
+
+struct slic_tx_desc {
+ __le32 hnd;
+ __le32 rsvd;
+ u8 cmd;
+ u8 flags;
+ __le16 rsvd2;
+ __le32 totlen;
+ __le32 paddrl;
+ __le32 paddrh;
+ __le32 len;
+ __le32 type;
+};
+
+struct slic_tx_buffer {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(map_addr);
+ DEFINE_DMA_UNMAP_LEN(map_len);
+ struct slic_tx_desc *desc;
+ dma_addr_t desc_paddr;
+};
+
+struct slic_tx_queue {
+ struct dma_pool *dma_pool;
+ struct slic_tx_buffer *txbuffs;
+ unsigned int len;
+ unsigned int put_idx;
+ unsigned int done_idx;
+};
+
+struct slic_rx_desc {
+ u8 pad[16];
+ __le32 buffer;
+ __le32 length;
+ __le32 status;
+};
+
+struct slic_rx_buffer {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(map_addr);
+ DEFINE_DMA_UNMAP_LEN(map_len);
+ unsigned int addr_offset;
+};
+
+struct slic_rx_queue {
+ struct slic_rx_buffer *rxbuffs;
+ unsigned int len;
+ unsigned int done_idx;
+ unsigned int put_idx;
+};
+
+struct slic_device {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ void __iomem *regs;
+ /* upper address setting lock */
+ spinlock_t upper_lock;
+ struct slic_shmem shmem;
+ struct napi_struct napi;
+ struct slic_rx_queue rxq;
+ struct slic_tx_queue txq;
+ struct slic_stat_queue stq;
+ struct slic_stats stats;
+ struct slic_upr_list upr_list;
+ /* link configuration lock */
+ spinlock_t link_lock;
+ bool promisc;
+ int speed;
+ unsigned int duplex;
+ bool is_fiber;
+ unsigned char model;
+};
+
+static inline u32 slic_read(struct slic_device *sdev, unsigned int reg)
+{
+ return ioread32(sdev->regs + reg);
+}
+
+static inline void slic_write(struct slic_device *sdev, unsigned int reg,
+ u32 val)
+{
+ iowrite32(val, sdev->regs + reg);
+}
+
+static inline void slic_flush_write(struct slic_device *sdev)
+{
+ (void)ioread32(sdev->regs + SLIC_REG_HOSTID);
+}
+
+#endif /* _SLIC_H */
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
new file mode 100644
index 000000000000..e77ecd5b307c
--- /dev/null
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -0,0 +1,1882 @@
+/*
+ * Driver for Gigabit Ethernet adapters based on the Session Layer
+ * Interface (SLIC) technology by Alacritech. The driver does not
+ * support the hardware acceleration features provided by these cards.
+ *
+ * Copyright (C) 2016 Lino Sanfilippo <LinoSanfilippo@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/u64_stats_sync.h>
+
+#include "slic.h"
+
+#define DRV_NAME "slicoss"
+#define DRV_VERSION "1.0"
+
+static const struct pci_device_id slic_id_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
+ PCI_DEVICE_ID_ALACRITECH_MOJAVE) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
+ PCI_DEVICE_ID_ALACRITECH_OASIS) },
+ { 0 }
+};
+
+static const char slic_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bytes",
+ "rx_multicasts",
+ "rx_errors",
+ "rx_buff_miss",
+ "rx_tp_csum",
+ "rx_tp_oflow",
+ "rx_tp_hlen",
+ "rx_ip_csum",
+ "rx_ip_len",
+ "rx_ip_hdr_len",
+ "rx_early",
+ "rx_buff_oflow",
+ "rx_lcode",
+ "rx_drbl",
+ "rx_crc",
+ "rx_oflow_802",
+ "rx_uflow_802",
+ "tx_packets",
+ "tx_bytes",
+ "tx_carrier",
+ "tx_dropped",
+ "irq_errs",
+};
+
+static inline int slic_next_queue_idx(unsigned int idx, unsigned int qlen)
+{
+ return (idx + 1) & (qlen - 1);
+}
+
+static inline int slic_get_free_queue_descs(unsigned int put_idx,
+ unsigned int done_idx,
+ unsigned int qlen)
+{
+ if (put_idx >= done_idx)
+ return (qlen - (put_idx - done_idx) - 1);
+ return (done_idx - put_idx - 1);
+}
+
+static unsigned int slic_next_compl_idx(struct slic_device *sdev)
+{
+ struct slic_stat_queue *stq = &sdev->stq;
+ unsigned int active = stq->active_array;
+ struct slic_stat_desc *descs;
+ struct slic_stat_desc *stat;
+ unsigned int idx;
+
+ descs = stq->descs[active];
+ stat = &descs[stq->done_idx];
+
+ if (!stat->status)
+ return SLIC_INVALID_STAT_DESC_IDX;
+
+ idx = (le32_to_cpu(stat->hnd) & 0xffff) - 1;
+ /* reset desc */
+ stat->hnd = 0;
+ stat->status = 0;
+
+ stq->done_idx = slic_next_queue_idx(stq->done_idx, stq->len);
+ /* check for wraparound */
+ if (!stq->done_idx) {
+ dma_addr_t paddr = stq->paddr[active];
+
+ slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) |
+ stq->len);
+ /* make sure new status descriptors are immediately available */
+ slic_flush_write(sdev);
+ active++;
+ active &= (SLIC_NUM_STAT_DESC_ARRAYS - 1);
+ stq->active_array = active;
+ }
+ return idx;
+}
+
+static unsigned int slic_get_free_tx_descs(struct slic_tx_queue *txq)
+{
+ /* ensure tail idx is updated */
+ smp_mb();
+ return slic_get_free_queue_descs(txq->put_idx, txq->done_idx, txq->len);
+}
+
+static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq)
+{
+ return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len);
+}
+
+static void slic_clear_upr_list(struct slic_upr_list *upr_list)
+{
+ struct slic_upr *upr;
+ struct slic_upr *tmp;
+
+ spin_lock_bh(&upr_list->lock);
+ list_for_each_entry_safe(upr, tmp, &upr_list->list, list) {
+ list_del(&upr->list);
+ kfree(upr);
+ }
+ upr_list->pending = false;
+ spin_unlock_bh(&upr_list->lock);
+}
+
+static void slic_start_upr(struct slic_device *sdev, struct slic_upr *upr)
+{
+ u32 reg;
+
+ reg = (upr->type == SLIC_UPR_CONFIG) ? SLIC_REG_RCONFIG :
+ SLIC_REG_LSTAT;
+ slic_write(sdev, reg, lower_32_bits(upr->paddr));
+ slic_flush_write(sdev);
+}
+
+static void slic_queue_upr(struct slic_device *sdev, struct slic_upr *upr)
+{
+ struct slic_upr_list *upr_list = &sdev->upr_list;
+ bool pending;
+
+ spin_lock_bh(&upr_list->lock);
+ pending = upr_list->pending;
+ INIT_LIST_HEAD(&upr->list);
+ list_add_tail(&upr->list, &upr_list->list);
+ upr_list->pending = true;
+ spin_unlock_bh(&upr_list->lock);
+
+ if (!pending)
+ slic_start_upr(sdev, upr);
+}
+
+static struct slic_upr *slic_dequeue_upr(struct slic_device *sdev)
+{
+ struct slic_upr_list *upr_list = &sdev->upr_list;
+ struct slic_upr *next_upr = NULL;
+ struct slic_upr *upr = NULL;
+
+ spin_lock_bh(&upr_list->lock);
+ if (!list_empty(&upr_list->list)) {
+ upr = list_first_entry(&upr_list->list, struct slic_upr, list);
+ list_del(&upr->list);
+
+ if (list_empty(&upr_list->list))
+ upr_list->pending = false;
+ else
+ next_upr = list_first_entry(&upr_list->list,
+ struct slic_upr, list);
+ }
+ spin_unlock_bh(&upr_list->lock);
+ /* trigger processing of the next upr in list */
+ if (next_upr)
+ slic_start_upr(sdev, next_upr);
+
+ return upr;
+}
+
+static int slic_new_upr(struct slic_device *sdev, unsigned int type,
+ dma_addr_t paddr)
+{
+ struct slic_upr *upr;
+
+ upr = kmalloc(sizeof(*upr), GFP_ATOMIC);
+ if (!upr)
+ return -ENOMEM;
+ upr->type = type;
+ upr->paddr = paddr;
+
+ slic_queue_upr(sdev, upr);
+
+ return 0;
+}
+
+static void slic_set_mcast_bit(u64 *mcmask, unsigned char const *addr)
+{
+ u64 mask = *mcmask;
+ u8 crc;
+ /* Get the CRC polynomial for the mac address: we use bits 1-8 (lsb),
+ * bitwise reversed, msb (= lsb bit 0 before bitrev) is automatically
+ * discarded.
+ */
+ crc = ether_crc(ETH_ALEN, addr) >> 23;
+ /* we only have space on the SLIC for 64 entries */
+ crc &= 0x3F;
+ mask |= (u64)1 << crc;
+ *mcmask = mask;
+}
+
+/* must be called with link_lock held */
+static void slic_configure_rcv(struct slic_device *sdev)
+{
+ u32 val;
+
+ val = SLIC_GRCR_RESET | SLIC_GRCR_ADDRAEN | SLIC_GRCR_RCVEN |
+ SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT | SLIC_GRCR_RCVBAD;
+
+ if (sdev->duplex == DUPLEX_FULL)
+ val |= SLIC_GRCR_CTLEN;
+
+ if (sdev->promisc)
+ val |= SLIC_GRCR_RCVALL;
+
+ slic_write(sdev, SLIC_REG_WRCFG, val);
+}
+
+/* must be called with link_lock held */
+static void slic_configure_xmt(struct slic_device *sdev)
+{
+ u32 val;
+
+ val = SLIC_GXCR_RESET | SLIC_GXCR_XMTEN;
+
+ if (sdev->duplex == DUPLEX_FULL)
+ val |= SLIC_GXCR_PAUSEEN;
+
+ slic_write(sdev, SLIC_REG_WXCFG, val);
+}
+
+/* must be called with link_lock held */
+static void slic_configure_mac(struct slic_device *sdev)
+{
+ u32 val;
+
+ if (sdev->speed == SPEED_1000) {
+ val = SLIC_GMCR_GAPBB_1000 << SLIC_GMCR_GAPBB_SHIFT |
+ SLIC_GMCR_GAPR1_1000 << SLIC_GMCR_GAPR1_SHIFT |
+ SLIC_GMCR_GAPR2_1000 << SLIC_GMCR_GAPR2_SHIFT |
+ SLIC_GMCR_GBIT; /* enable GMII */
+ } else {
+ val = SLIC_GMCR_GAPBB_100 << SLIC_GMCR_GAPBB_SHIFT |
+ SLIC_GMCR_GAPR1_100 << SLIC_GMCR_GAPR1_SHIFT |
+ SLIC_GMCR_GAPR2_100 << SLIC_GMCR_GAPR2_SHIFT;
+ }
+
+ if (sdev->duplex == DUPLEX_FULL)
+ val |= SLIC_GMCR_FULLD;
+
+ slic_write(sdev, SLIC_REG_WMCFG, val);
+}
+
+static void slic_configure_link_locked(struct slic_device *sdev, int speed,
+ unsigned int duplex)
+{
+ struct net_device *dev = sdev->netdev;
+
+ if (sdev->speed == speed && sdev->duplex == duplex)
+ return;
+
+ sdev->speed = speed;
+ sdev->duplex = duplex;
+
+ if (sdev->speed == SPEED_UNKNOWN) {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ } else {
+ /* (re)configure link settings */
+ slic_configure_mac(sdev);
+ slic_configure_xmt(sdev);
+ slic_configure_rcv(sdev);
+ slic_flush_write(sdev);
+
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ }
+}
+
+static void slic_configure_link(struct slic_device *sdev, int speed,
+ unsigned int duplex)
+{
+ spin_lock_bh(&sdev->link_lock);
+ slic_configure_link_locked(sdev, speed, duplex);
+ spin_unlock_bh(&sdev->link_lock);
+}
+
+static void slic_set_rx_mode(struct net_device *dev)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ struct netdev_hw_addr *hwaddr;
+ bool set_promisc;
+ u64 mcmask;
+
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ /* Turn on all multicast addresses. We have to do this for
+ * promiscuous mode as well as ALLMCAST mode (it saves the
+ * microcode from having to keep state about the MAC
+ * configuration).
+ */
+ mcmask = ~(u64)0;
+ } else {
+ mcmask = 0;
+
+ netdev_for_each_mc_addr(hwaddr, dev) {
+ slic_set_mcast_bit(&mcmask, hwaddr->addr);
+ }
+ }
+
+ slic_write(sdev, SLIC_REG_MCASTLOW, lower_32_bits(mcmask));
+ slic_write(sdev, SLIC_REG_MCASTHIGH, upper_32_bits(mcmask));
+
+ set_promisc = !!(dev->flags & IFF_PROMISC);
+
+ spin_lock_bh(&sdev->link_lock);
+ if (sdev->promisc != set_promisc) {
+ sdev->promisc = set_promisc;
+ slic_configure_rcv(sdev);
+ /* make sure writes to receiver cant leak out of the lock */
+ mmiowb();
+ }
+ spin_unlock_bh(&sdev->link_lock);
+}
+
+static void slic_xmit_complete(struct slic_device *sdev)
+{
+ struct slic_tx_queue *txq = &sdev->txq;
+ struct net_device *dev = sdev->netdev;
+ unsigned int idx = txq->done_idx;
+ struct slic_tx_buffer *buff;
+ unsigned int frames = 0;
+ unsigned int bytes = 0;
+
+ /* Limit processing to SLIC_MAX_TX_COMPLETIONS frames to avoid that new
+ * completions during processing keeps the loop running endlessly.
+ */
+ do {
+ idx = slic_next_compl_idx(sdev);
+ if (idx == SLIC_INVALID_STAT_DESC_IDX)
+ break;
+
+ txq->done_idx = idx;
+ buff = &txq->txbuffs[idx];
+
+ if (unlikely(!buff->skb)) {
+ netdev_warn(dev,
+ "no skb found for desc idx %i\n", idx);
+ continue;
+ }
+ dma_unmap_single(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ dma_unmap_len(buff, map_len), DMA_TO_DEVICE);
+
+ bytes += buff->skb->len;
+ frames++;
+
+ dev_kfree_skb_any(buff->skb);
+ buff->skb = NULL;
+ } while (frames < SLIC_MAX_TX_COMPLETIONS);
+ /* make sure xmit sees the new value for done_idx */
+ smp_wmb();
+
+ u64_stats_update_begin(&sdev->stats.syncp);
+ sdev->stats.tx_bytes += bytes;
+ sdev->stats.tx_packets += frames;
+ u64_stats_update_end(&sdev->stats.syncp);
+
+ netif_tx_lock(dev);
+ if (netif_queue_stopped(dev) &&
+ (slic_get_free_tx_descs(txq) >= SLIC_MIN_TX_WAKEUP_DESCS))
+ netif_wake_queue(dev);
+ netif_tx_unlock(dev);
+}
+
+static void slic_refill_rx_queue(struct slic_device *sdev, gfp_t gfp)
+{
+ const unsigned int ALIGN_MASK = SLIC_RX_BUFF_ALIGN - 1;
+ unsigned int maplen = SLIC_RX_BUFF_SIZE;
+ struct slic_rx_queue *rxq = &sdev->rxq;
+ struct net_device *dev = sdev->netdev;
+ struct slic_rx_buffer *buff;
+ struct slic_rx_desc *desc;
+ unsigned int misalign;
+ unsigned int offset;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) {
+ skb = alloc_skb(maplen + ALIGN_MASK, gfp);
+ if (!skb)
+ break;
+
+ paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&sdev->pdev->dev, paddr)) {
+ netdev_err(dev, "mapping rx packet failed\n");
+ /* drop skb */
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ /* ensure head buffer descriptors are 256 byte aligned */
+ offset = 0;
+ misalign = paddr & ALIGN_MASK;
+ if (misalign) {
+ offset = SLIC_RX_BUFF_ALIGN - misalign;
+ skb_reserve(skb, offset);
+ }
+ /* the HW expects dma chunks for descriptor + frame data */
+ desc = (struct slic_rx_desc *)skb->data;
+ /* temporarily sync descriptor for CPU to clear status */
+ dma_sync_single_for_cpu(&sdev->pdev->dev, paddr,
+ offset + sizeof(*desc),
+ DMA_FROM_DEVICE);
+ desc->status = 0;
+ /* return it to HW again */
+ dma_sync_single_for_device(&sdev->pdev->dev, paddr,
+ offset + sizeof(*desc),
+ DMA_FROM_DEVICE);
+
+ buff = &rxq->rxbuffs[rxq->put_idx];
+ buff->skb = skb;
+ dma_unmap_addr_set(buff, map_addr, paddr);
+ dma_unmap_len_set(buff, map_len, maplen);
+ buff->addr_offset = offset;
+ /* complete write to descriptor before it is handed to HW */
+ wmb();
+ /* head buffer descriptors are placed immediately before skb */
+ slic_write(sdev, SLIC_REG_HBAR, lower_32_bits(paddr) + offset);
+ rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len);
+ }
+}
+
+static void slic_handle_frame_error(struct slic_device *sdev,
+ struct sk_buff *skb)
+{
+ struct slic_stats *stats = &sdev->stats;
+
+ if (sdev->model == SLIC_MODEL_OASIS) {
+ struct slic_rx_info_oasis *info;
+ u32 status_b;
+ u32 status;
+
+ info = (struct slic_rx_info_oasis *)skb->data;
+ status = le32_to_cpu(info->frame_status);
+ status_b = le32_to_cpu(info->frame_status_b);
+ /* transport layer */
+ if (status_b & SLIC_VRHSTATB_TPCSUM)
+ SLIC_INC_STATS_COUNTER(stats, rx_tpcsum);
+ if (status & SLIC_VRHSTAT_TPOFLO)
+ SLIC_INC_STATS_COUNTER(stats, rx_tpoflow);
+ if (status_b & SLIC_VRHSTATB_TPHLEN)
+ SLIC_INC_STATS_COUNTER(stats, rx_tphlen);
+ /* ip layer */
+ if (status_b & SLIC_VRHSTATB_IPCSUM)
+ SLIC_INC_STATS_COUNTER(stats, rx_ipcsum);
+ if (status_b & SLIC_VRHSTATB_IPLERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_iplen);
+ if (status_b & SLIC_VRHSTATB_IPHERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_iphlen);
+ /* link layer */
+ if (status_b & SLIC_VRHSTATB_RCVE)
+ SLIC_INC_STATS_COUNTER(stats, rx_early);
+ if (status_b & SLIC_VRHSTATB_BUFF)
+ SLIC_INC_STATS_COUNTER(stats, rx_buffoflow);
+ if (status_b & SLIC_VRHSTATB_CODE)
+ SLIC_INC_STATS_COUNTER(stats, rx_lcode);
+ if (status_b & SLIC_VRHSTATB_DRBL)
+ SLIC_INC_STATS_COUNTER(stats, rx_drbl);
+ if (status_b & SLIC_VRHSTATB_CRC)
+ SLIC_INC_STATS_COUNTER(stats, rx_crc);
+ if (status & SLIC_VRHSTAT_802OE)
+ SLIC_INC_STATS_COUNTER(stats, rx_oflow802);
+ if (status_b & SLIC_VRHSTATB_802UE)
+ SLIC_INC_STATS_COUNTER(stats, rx_uflow802);
+ if (status_b & SLIC_VRHSTATB_CARRE)
+ SLIC_INC_STATS_COUNTER(stats, tx_carrier);
+ } else { /* mojave */
+ struct slic_rx_info_mojave *info;
+ u32 status;
+
+ info = (struct slic_rx_info_mojave *)skb->data;
+ status = le32_to_cpu(info->frame_status);
+ /* transport layer */
+ if (status & SLIC_VGBSTAT_XPERR) {
+ u32 xerr = status >> SLIC_VGBSTAT_XERRSHFT;
+
+ if (xerr == SLIC_VGBSTAT_XCSERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_tpcsum);
+ if (xerr == SLIC_VGBSTAT_XUFLOW)
+ SLIC_INC_STATS_COUNTER(stats, rx_tpoflow);
+ if (xerr == SLIC_VGBSTAT_XHLEN)
+ SLIC_INC_STATS_COUNTER(stats, rx_tphlen);
+ }
+ /* ip layer */
+ if (status & SLIC_VGBSTAT_NETERR) {
+ u32 nerr = status >> SLIC_VGBSTAT_NERRSHFT &
+ SLIC_VGBSTAT_NERRMSK;
+
+ if (nerr == SLIC_VGBSTAT_NCSERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_ipcsum);
+ if (nerr == SLIC_VGBSTAT_NUFLOW)
+ SLIC_INC_STATS_COUNTER(stats, rx_iplen);
+ if (nerr == SLIC_VGBSTAT_NHLEN)
+ SLIC_INC_STATS_COUNTER(stats, rx_iphlen);
+ }
+ /* link layer */
+ if (status & SLIC_VGBSTAT_LNKERR) {
+ u32 lerr = status & SLIC_VGBSTAT_LERRMSK;
+
+ if (lerr == SLIC_VGBSTAT_LDEARLY)
+ SLIC_INC_STATS_COUNTER(stats, rx_early);
+ if (lerr == SLIC_VGBSTAT_LBOFLO)
+ SLIC_INC_STATS_COUNTER(stats, rx_buffoflow);
+ if (lerr == SLIC_VGBSTAT_LCODERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_lcode);
+ if (lerr == SLIC_VGBSTAT_LDBLNBL)
+ SLIC_INC_STATS_COUNTER(stats, rx_drbl);
+ if (lerr == SLIC_VGBSTAT_LCRCERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_crc);
+ if (lerr == SLIC_VGBSTAT_LOFLO)
+ SLIC_INC_STATS_COUNTER(stats, rx_oflow802);
+ if (lerr == SLIC_VGBSTAT_LUFLO)
+ SLIC_INC_STATS_COUNTER(stats, rx_uflow802);
+ }
+ }
+ SLIC_INC_STATS_COUNTER(stats, rx_errors);
+}
+
+static void slic_handle_receive(struct slic_device *sdev, unsigned int todo,
+ unsigned int *done)
+{
+ struct slic_rx_queue *rxq = &sdev->rxq;
+ struct net_device *dev = sdev->netdev;
+ struct slic_rx_buffer *buff;
+ struct slic_rx_desc *desc;
+ unsigned int frames = 0;
+ unsigned int bytes = 0;
+ struct sk_buff *skb;
+ u32 status;
+ u32 len;
+
+ while (todo && (rxq->done_idx != rxq->put_idx)) {
+ buff = &rxq->rxbuffs[rxq->done_idx];
+
+ skb = buff->skb;
+ if (!skb)
+ break;
+
+ desc = (struct slic_rx_desc *)skb->data;
+
+ dma_sync_single_for_cpu(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ buff->addr_offset + sizeof(*desc),
+ DMA_FROM_DEVICE);
+
+ status = le32_to_cpu(desc->status);
+ if (!(status & SLIC_IRHDDR_SVALID)) {
+ dma_sync_single_for_device(&sdev->pdev->dev,
+ dma_unmap_addr(buff,
+ map_addr),
+ buff->addr_offset +
+ sizeof(*desc),
+ DMA_FROM_DEVICE);
+ break;
+ }
+
+ buff->skb = NULL;
+
+ dma_unmap_single(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ dma_unmap_len(buff, map_len),
+ DMA_FROM_DEVICE);
+
+ /* skip rx descriptor that is placed before the frame data */
+ skb_reserve(skb, SLIC_RX_BUFF_HDR_SIZE);
+
+ if (unlikely(status & SLIC_IRHDDR_ERR)) {
+ slic_handle_frame_error(sdev, skb);
+ dev_kfree_skb_any(skb);
+ } else {
+ struct ethhdr *eh = (struct ethhdr *)skb->data;
+
+ if (is_multicast_ether_addr(eh->h_dest))
+ SLIC_INC_STATS_COUNTER(&sdev->stats, rx_mcasts);
+
+ len = le32_to_cpu(desc->length) & SLIC_IRHDDR_FLEN_MSK;
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_receive(&sdev->napi, skb);
+
+ bytes += len;
+ frames++;
+ }
+ rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len);
+ todo--;
+ }
+
+ u64_stats_update_begin(&sdev->stats.syncp);
+ sdev->stats.rx_bytes += bytes;
+ sdev->stats.rx_packets += frames;
+ u64_stats_update_end(&sdev->stats.syncp);
+
+ slic_refill_rx_queue(sdev, GFP_ATOMIC);
+}
+
+static void slic_handle_link_irq(struct slic_device *sdev)
+{
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+ unsigned int duplex;
+ int speed;
+ u32 link;
+
+ link = le32_to_cpu(sm_data->link);
+
+ if (link & SLIC_GIG_LINKUP) {
+ if (link & SLIC_GIG_SPEED_1000)
+ speed = SPEED_1000;
+ else if (link & SLIC_GIG_SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ duplex = (link & SLIC_GIG_FULLDUPLEX) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+ } else {
+ duplex = DUPLEX_UNKNOWN;
+ speed = SPEED_UNKNOWN;
+ }
+ slic_configure_link(sdev, speed, duplex);
+}
+
+static void slic_handle_upr_irq(struct slic_device *sdev, u32 irqs)
+{
+ struct slic_upr *upr;
+
+ /* remove upr that caused this irq (always the first entry in list) */
+ upr = slic_dequeue_upr(sdev);
+ if (!upr) {
+ netdev_warn(sdev->netdev, "no upr found on list\n");
+ return;
+ }
+
+ if (upr->type == SLIC_UPR_LSTAT) {
+ if (unlikely(irqs & SLIC_ISR_UPCERR_MASK)) {
+ /* try again */
+ slic_queue_upr(sdev, upr);
+ return;
+ }
+ slic_handle_link_irq(sdev);
+ }
+ kfree(upr);
+}
+
+static int slic_handle_link_change(struct slic_device *sdev)
+{
+ return slic_new_upr(sdev, SLIC_UPR_LSTAT, sdev->shmem.link_paddr);
+}
+
+static void slic_handle_err_irq(struct slic_device *sdev, u32 isr)
+{
+ struct slic_stats *stats = &sdev->stats;
+
+ if (isr & SLIC_ISR_RMISS)
+ SLIC_INC_STATS_COUNTER(stats, rx_buff_miss);
+ if (isr & SLIC_ISR_XDROP)
+ SLIC_INC_STATS_COUNTER(stats, tx_dropped);
+ if (!(isr & (SLIC_ISR_RMISS | SLIC_ISR_XDROP)))
+ SLIC_INC_STATS_COUNTER(stats, irq_errs);
+}
+
+static void slic_handle_irq(struct slic_device *sdev, u32 isr,
+ unsigned int todo, unsigned int *done)
+{
+ if (isr & SLIC_ISR_ERR)
+ slic_handle_err_irq(sdev, isr);
+
+ if (isr & SLIC_ISR_LEVENT)
+ slic_handle_link_change(sdev);
+
+ if (isr & SLIC_ISR_UPC_MASK)
+ slic_handle_upr_irq(sdev, isr);
+
+ if (isr & SLIC_ISR_RCV)
+ slic_handle_receive(sdev, todo, done);
+
+ if (isr & SLIC_ISR_CMD)
+ slic_xmit_complete(sdev);
+}
+
+static int slic_poll(struct napi_struct *napi, int todo)
+{
+ struct slic_device *sdev = container_of(napi, struct slic_device, napi);
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+ u32 isr = le32_to_cpu(sm_data->isr);
+ int done = 0;
+
+ slic_handle_irq(sdev, isr, todo, &done);
+
+ if (done < todo) {
+ napi_complete_done(napi, done);
+ /* reenable irqs */
+ sm_data->isr = 0;
+ /* make sure sm_data->isr is cleard before irqs are reenabled */
+ wmb();
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+ }
+
+ return done;
+}
+
+static irqreturn_t slic_irq(int irq, void *dev_id)
+{
+ struct slic_device *sdev = dev_id;
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_MASK);
+ slic_flush_write(sdev);
+ /* make sure sm_data->isr is read after ICR_INT_MASK is set */
+ wmb();
+
+ if (!sm_data->isr) {
+ dma_rmb();
+ /* spurious interrupt */
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+ return IRQ_NONE;
+ }
+
+ napi_schedule_irqoff(&sdev->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void slic_card_reset(struct slic_device *sdev)
+{
+ u16 cmd;
+
+ slic_write(sdev, SLIC_REG_RESET, SLIC_RESET_MAGIC);
+ /* flush write by means of config space */
+ pci_read_config_word(sdev->pdev, PCI_COMMAND, &cmd);
+ mdelay(1);
+}
+
+static int slic_init_stat_queue(struct slic_device *sdev)
+{
+ const unsigned int DESC_ALIGN_MASK = SLIC_STATS_DESC_ALIGN - 1;
+ struct slic_stat_queue *stq = &sdev->stq;
+ struct slic_stat_desc *descs;
+ unsigned int misalign;
+ unsigned int offset;
+ dma_addr_t paddr;
+ size_t size;
+ int err;
+ int i;
+
+ stq->len = SLIC_NUM_STAT_DESCS;
+ stq->active_array = 0;
+ stq->done_idx = 0;
+
+ size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
+
+ for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
+ descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr,
+ GFP_KERNEL);
+ if (!descs) {
+ netdev_err(sdev->netdev,
+ "failed to allocate status descriptors\n");
+ err = -ENOMEM;
+ goto free_descs;
+ }
+ /* ensure correct alignment */
+ offset = 0;
+ misalign = paddr & DESC_ALIGN_MASK;
+ if (misalign) {
+ offset = SLIC_STATS_DESC_ALIGN - misalign;
+ descs += offset;
+ paddr += offset;
+ }
+
+ slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) |
+ stq->len);
+ stq->descs[i] = descs;
+ stq->paddr[i] = paddr;
+ stq->addr_offset[i] = offset;
+ }
+
+ stq->mem_size = size;
+
+ return 0;
+
+free_descs:
+ while (i--) {
+ dma_free_coherent(&sdev->pdev->dev, stq->mem_size,
+ stq->descs[i] - stq->addr_offset[i],
+ stq->paddr[i] - stq->addr_offset[i]);
+ }
+
+ return err;
+}
+
+static void slic_free_stat_queue(struct slic_device *sdev)
+{
+ struct slic_stat_queue *stq = &sdev->stq;
+ int i;
+
+ for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
+ dma_free_coherent(&sdev->pdev->dev, stq->mem_size,
+ stq->descs[i] - stq->addr_offset[i],
+ stq->paddr[i] - stq->addr_offset[i]);
+ }
+}
+
+static int slic_init_tx_queue(struct slic_device *sdev)
+{
+ struct slic_tx_queue *txq = &sdev->txq;
+ struct slic_tx_buffer *buff;
+ struct slic_tx_desc *desc;
+ unsigned int i;
+ int err;
+
+ txq->len = SLIC_NUM_TX_DESCS;
+ txq->put_idx = 0;
+ txq->done_idx = 0;
+
+ txq->txbuffs = kcalloc(txq->len, sizeof(*buff), GFP_KERNEL);
+ if (!txq->txbuffs)
+ return -ENOMEM;
+
+ txq->dma_pool = dma_pool_create("slic_pool", &sdev->pdev->dev,
+ sizeof(*desc), SLIC_TX_DESC_ALIGN,
+ 4096);
+ if (!txq->dma_pool) {
+ err = -ENOMEM;
+ netdev_err(sdev->netdev, "failed to create dma pool\n");
+ goto free_buffs;
+ }
+
+ for (i = 0; i < txq->len; i++) {
+ buff = &txq->txbuffs[i];
+ desc = dma_pool_zalloc(txq->dma_pool, GFP_KERNEL,
+ &buff->desc_paddr);
+ if (!desc) {
+ netdev_err(sdev->netdev,
+ "failed to alloc pool chunk (%i)\n", i);
+ err = -ENOMEM;
+ goto free_descs;
+ }
+
+ desc->hnd = cpu_to_le32((u32)(i + 1));
+ desc->cmd = SLIC_CMD_XMT_REQ;
+ desc->flags = 0;
+ desc->type = cpu_to_le32(SLIC_CMD_TYPE_DUMB);
+ buff->desc = desc;
+ }
+
+ return 0;
+
+free_descs:
+ while (i--) {
+ buff = &txq->txbuffs[i];
+ dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr);
+ }
+ dma_pool_destroy(txq->dma_pool);
+
+free_buffs:
+ kfree(txq->txbuffs);
+
+ return err;
+}
+
+static void slic_free_tx_queue(struct slic_device *sdev)
+{
+ struct slic_tx_queue *txq = &sdev->txq;
+ struct slic_tx_buffer *buff;
+ unsigned int i;
+
+ for (i = 0; i < txq->len; i++) {
+ buff = &txq->txbuffs[i];
+ dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr);
+ if (!buff->skb)
+ continue;
+
+ dma_unmap_single(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ dma_unmap_len(buff, map_len), DMA_TO_DEVICE);
+ consume_skb(buff->skb);
+ }
+ dma_pool_destroy(txq->dma_pool);
+
+ kfree(txq->txbuffs);
+}
+
+static int slic_init_rx_queue(struct slic_device *sdev)
+{
+ struct slic_rx_queue *rxq = &sdev->rxq;
+ struct slic_rx_buffer *buff;
+
+ rxq->len = SLIC_NUM_RX_LES;
+ rxq->done_idx = 0;
+ rxq->put_idx = 0;
+
+ buff = kcalloc(rxq->len, sizeof(*buff), GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ rxq->rxbuffs = buff;
+ slic_refill_rx_queue(sdev, GFP_KERNEL);
+
+ return 0;
+}
+
+static void slic_free_rx_queue(struct slic_device *sdev)
+{
+ struct slic_rx_queue *rxq = &sdev->rxq;
+ struct slic_rx_buffer *buff;
+ unsigned int i;
+
+ /* free rx buffers */
+ for (i = 0; i < rxq->len; i++) {
+ buff = &rxq->rxbuffs[i];
+
+ if (!buff->skb)
+ continue;
+
+ dma_unmap_single(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ dma_unmap_len(buff, map_len),
+ DMA_FROM_DEVICE);
+ consume_skb(buff->skb);
+ }
+ kfree(rxq->rxbuffs);
+}
+
+static void slic_set_link_autoneg(struct slic_device *sdev)
+{
+ unsigned int subid = sdev->pdev->subsystem_device;
+ u32 val;
+
+ if (sdev->is_fiber) {
+ /* We've got a fiber gigabit interface, and register 4 is
+ * different in fiber mode than in copper mode.
+ */
+ /* advertise FD only @1000 Mb */
+ val = MII_ADVERTISE << 16 | ADVERTISE_1000XFULL |
+ ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+ /* enable PAUSE frames */
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ /* reset phy, enable auto-neg */
+ val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE |
+ BMCR_ANRESTART;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ } else { /* copper gigabit */
+ /* We've got a copper gigabit interface, and register 4 is
+ * different in copper mode than in fiber mode.
+ */
+ /* advertise 10/100 Mb modes */
+ val = MII_ADVERTISE << 16 | ADVERTISE_100FULL |
+ ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF;
+ /* enable PAUSE frames */
+ val |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ /* required by the Cicada PHY */
+ val |= ADVERTISE_CSMA;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+
+ /* advertise FD only @1000 Mb */
+ val = MII_CTRL1000 << 16 | ADVERTISE_1000FULL;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+
+ if (subid != PCI_SUBDEVICE_ID_ALACRITECH_CICADA) {
+ /* if a Marvell PHY enable auto crossover */
+ val = SLIC_MIICR_REG_16 | SLIC_MRV_REG16_XOVERON;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+
+ /* reset phy, enable auto-neg */
+ val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE |
+ BMCR_ANRESTART;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ } else {
+ /* enable and restart auto-neg (don't reset) */
+ val = MII_BMCR << 16 | BMCR_ANENABLE | BMCR_ANRESTART;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ }
+ }
+}
+
+static void slic_set_mac_address(struct slic_device *sdev)
+{
+ u8 *addr = sdev->netdev->dev_addr;
+ u32 val;
+
+ val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24;
+
+ slic_write(sdev, SLIC_REG_WRADDRAL, val);
+ slic_write(sdev, SLIC_REG_WRADDRBL, val);
+
+ val = addr[0] << 8 | addr[1];
+
+ slic_write(sdev, SLIC_REG_WRADDRAH, val);
+ slic_write(sdev, SLIC_REG_WRADDRBH, val);
+ slic_flush_write(sdev);
+}
+
+static u32 slic_read_dword_from_firmware(const struct firmware *fw, int *offset)
+{
+ int idx = *offset;
+ __le32 val;
+
+ memcpy(&val, fw->data + *offset, sizeof(val));
+ idx += 4;
+ *offset = idx;
+
+ return le32_to_cpu(val);
+}
+
+MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_MOJAVE);
+MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_OASIS);
+
+static int slic_load_rcvseq_firmware(struct slic_device *sdev)
+{
+ const struct firmware *fw;
+ const char *file;
+ u32 codelen;
+ int idx = 0;
+ u32 instr;
+ u32 addr;
+ int err;
+
+ file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_RCV_FIRMWARE_OASIS :
+ SLIC_RCV_FIRMWARE_MOJAVE;
+ err = request_firmware(&fw, file, &sdev->pdev->dev);
+ if (err) {
+ dev_err(&sdev->pdev->dev,
+ "failed to load receive sequencer firmware %s\n", file);
+ return err;
+ }
+ /* Do an initial sanity check concerning firmware size now. A further
+ * check follows below.
+ */
+ if (fw->size < SLIC_FIRMWARE_MIN_SIZE) {
+ dev_err(&sdev->pdev->dev,
+ "invalid firmware size %zu (min %u expected)\n",
+ fw->size, SLIC_FIRMWARE_MIN_SIZE);
+ err = -EINVAL;
+ goto release;
+ }
+
+ codelen = slic_read_dword_from_firmware(fw, &idx);
+
+ /* do another sanity check against firmware size */
+ if ((codelen + 4) > fw->size) {
+ dev_err(&sdev->pdev->dev,
+ "invalid rcv-sequencer firmware size %zu\n", fw->size);
+ err = -EINVAL;
+ goto release;
+ }
+
+ /* download sequencer code to card */
+ slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN);
+ for (addr = 0; addr < codelen; addr++) {
+ __le32 val;
+ /* write out instruction address */
+ slic_write(sdev, SLIC_REG_RCV_WCS, addr);
+
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ /* write out the instruction data low addr */
+ slic_write(sdev, SLIC_REG_RCV_WCS, instr);
+
+ val = (__le32)fw->data[idx];
+ instr = le32_to_cpu(val);
+ idx++;
+ /* write out the instruction data high addr */
+ slic_write(sdev, SLIC_REG_RCV_WCS, instr);
+ }
+ /* finish download */
+ slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH);
+ slic_flush_write(sdev);
+release:
+ release_firmware(fw);
+
+ return err;
+}
+
+MODULE_FIRMWARE(SLIC_FIRMWARE_MOJAVE);
+MODULE_FIRMWARE(SLIC_FIRMWARE_OASIS);
+
+static int slic_load_firmware(struct slic_device *sdev)
+{
+ u32 sectstart[SLIC_FIRMWARE_MAX_SECTIONS];
+ u32 sectsize[SLIC_FIRMWARE_MAX_SECTIONS];
+ const struct firmware *fw;
+ unsigned int datalen;
+ const char *file;
+ int code_start;
+ unsigned int i;
+ u32 numsects;
+ int idx = 0;
+ u32 sect;
+ u32 instr;
+ u32 addr;
+ u32 base;
+ int err;
+
+ file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_FIRMWARE_OASIS :
+ SLIC_FIRMWARE_MOJAVE;
+ err = request_firmware(&fw, file, &sdev->pdev->dev);
+ if (err) {
+ dev_err(&sdev->pdev->dev, "failed to load firmware %s\n", file);
+ return err;
+ }
+ /* Do an initial sanity check concerning firmware size now. A further
+ * check follows below.
+ */
+ if (fw->size < SLIC_FIRMWARE_MIN_SIZE) {
+ dev_err(&sdev->pdev->dev,
+ "invalid firmware size %zu (min is %u)\n", fw->size,
+ SLIC_FIRMWARE_MIN_SIZE);
+ err = -EINVAL;
+ goto release;
+ }
+
+ numsects = slic_read_dword_from_firmware(fw, &idx);
+ if (numsects == 0 || numsects > SLIC_FIRMWARE_MAX_SECTIONS) {
+ dev_err(&sdev->pdev->dev,
+ "invalid number of sections in firmware: %u", numsects);
+ err = -EINVAL;
+ goto release;
+ }
+
+ datalen = numsects * 8 + 4;
+ for (i = 0; i < numsects; i++) {
+ sectsize[i] = slic_read_dword_from_firmware(fw, &idx);
+ datalen += sectsize[i];
+ }
+
+ /* do another sanity check against firmware size */
+ if (datalen > fw->size) {
+ dev_err(&sdev->pdev->dev,
+ "invalid firmware size %zu (expected >= %u)\n",
+ fw->size, datalen);
+ err = -EINVAL;
+ goto release;
+ }
+ /* get sections */
+ for (i = 0; i < numsects; i++)
+ sectstart[i] = slic_read_dword_from_firmware(fw, &idx);
+
+ code_start = idx;
+ instr = slic_read_dword_from_firmware(fw, &idx);
+
+ for (sect = 0; sect < numsects; sect++) {
+ unsigned int ssize = sectsize[sect] >> 3;
+
+ base = sectstart[sect];
+
+ for (addr = 0; addr < ssize; addr++) {
+ /* write out instruction address */
+ slic_write(sdev, SLIC_REG_WCS, base + addr);
+ /* write out instruction to low addr */
+ slic_write(sdev, SLIC_REG_WCS, instr);
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ /* write out instruction to high addr */
+ slic_write(sdev, SLIC_REG_WCS, instr);
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ }
+ }
+
+ idx = code_start;
+
+ for (sect = 0; sect < numsects; sect++) {
+ unsigned int ssize = sectsize[sect] >> 3;
+
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ base = sectstart[sect];
+ if (base < 0x8000)
+ continue;
+
+ for (addr = 0; addr < ssize; addr++) {
+ /* write out instruction address */
+ slic_write(sdev, SLIC_REG_WCS,
+ SLIC_WCS_COMPARE | (base + addr));
+ /* write out instruction to low addr */
+ slic_write(sdev, SLIC_REG_WCS, instr);
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ /* write out instruction to high addr */
+ slic_write(sdev, SLIC_REG_WCS, instr);
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ }
+ }
+ slic_flush_write(sdev);
+ mdelay(10);
+ /* everything OK, kick off the card */
+ slic_write(sdev, SLIC_REG_WCS, SLIC_WCS_START);
+ slic_flush_write(sdev);
+ /* wait long enough for ucode to init card and reach the mainloop */
+ mdelay(20);
+release:
+ release_firmware(fw);
+
+ return err;
+}
+
+static int slic_init_shmem(struct slic_device *sdev)
+{
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data;
+ dma_addr_t paddr;
+
+ sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
+ &paddr, GFP_KERNEL);
+ if (!sm_data) {
+ dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
+ return -ENOMEM;
+ }
+
+ sm->shmem_data = sm_data;
+ sm->isr_paddr = paddr;
+ sm->link_paddr = paddr + offsetof(struct slic_shmem_data, link);
+
+ return 0;
+}
+
+static void slic_free_shmem(struct slic_device *sdev)
+{
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+
+ dma_free_coherent(&sdev->pdev->dev, sizeof(*sm_data), sm_data,
+ sm->isr_paddr);
+}
+
+static int slic_init_iface(struct slic_device *sdev)
+{
+ struct slic_shmem *sm = &sdev->shmem;
+ int err;
+
+ sdev->upr_list.pending = false;
+
+ err = slic_init_shmem(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to init shared memory\n");
+ return err;
+ }
+
+ err = slic_load_firmware(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to load firmware\n");
+ goto free_sm;
+ }
+
+ err = slic_load_rcvseq_firmware(sdev);
+ if (err) {
+ netdev_err(sdev->netdev,
+ "failed to load firmware for receive sequencer\n");
+ goto free_sm;
+ }
+
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
+ slic_flush_write(sdev);
+ mdelay(1);
+
+ err = slic_init_rx_queue(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to init rx queue: %u\n", err);
+ goto free_sm;
+ }
+
+ err = slic_init_tx_queue(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to init tx queue: %u\n", err);
+ goto free_rxq;
+ }
+
+ err = slic_init_stat_queue(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to init status queue: %u\n",
+ err);
+ goto free_txq;
+ }
+
+ slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr));
+ napi_enable(&sdev->napi);
+ /* disable irq mitigation */
+ slic_write(sdev, SLIC_REG_INTAGG, 0);
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+
+ slic_set_mac_address(sdev);
+
+ spin_lock_bh(&sdev->link_lock);
+ sdev->duplex = DUPLEX_UNKNOWN;
+ sdev->speed = SPEED_UNKNOWN;
+ spin_unlock_bh(&sdev->link_lock);
+
+ slic_set_link_autoneg(sdev);
+
+ err = request_irq(sdev->pdev->irq, slic_irq, IRQF_SHARED, DRV_NAME,
+ sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to request irq: %u\n", err);
+ goto disable_napi;
+ }
+
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_ON);
+ slic_flush_write(sdev);
+ /* request initial link status */
+ err = slic_handle_link_change(sdev);
+ if (err)
+ netdev_warn(sdev->netdev,
+ "failed to set initial link state: %u\n", err);
+ return 0;
+
+disable_napi:
+ napi_disable(&sdev->napi);
+ slic_free_stat_queue(sdev);
+free_txq:
+ slic_free_tx_queue(sdev);
+free_rxq:
+ slic_free_rx_queue(sdev);
+free_sm:
+ slic_free_shmem(sdev);
+ slic_card_reset(sdev);
+
+ return err;
+}
+
+static int slic_open(struct net_device *dev)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ int err;
+
+ netif_carrier_off(dev);
+
+ err = slic_init_iface(sdev);
+ if (err) {
+ netdev_err(dev, "failed to initialize interface: %i\n", err);
+ return err;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int slic_close(struct net_device *dev)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ u32 val;
+
+ netif_stop_queue(dev);
+
+ /* stop irq handling */
+ napi_disable(&sdev->napi);
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+
+ free_irq(sdev->pdev->irq, sdev);
+ /* turn off RCV and XMT and power down PHY */
+ val = SLIC_GXCR_RESET | SLIC_GXCR_PAUSEEN;
+ slic_write(sdev, SLIC_REG_WXCFG, val);
+
+ val = SLIC_GRCR_RESET | SLIC_GRCR_CTLEN | SLIC_GRCR_ADDRAEN |
+ SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT;
+ slic_write(sdev, SLIC_REG_WRCFG, val);
+
+ val = MII_BMCR << 16 | BMCR_PDOWN;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ slic_flush_write(sdev);
+
+ slic_clear_upr_list(&sdev->upr_list);
+ slic_write(sdev, SLIC_REG_QUIESCE, 0);
+
+ slic_free_stat_queue(sdev);
+ slic_free_tx_queue(sdev);
+ slic_free_rx_queue(sdev);
+ slic_free_shmem(sdev);
+
+ slic_card_reset(sdev);
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ struct slic_tx_queue *txq = &sdev->txq;
+ struct slic_tx_buffer *buff;
+ struct slic_tx_desc *desc;
+ dma_addr_t paddr;
+ u32 cbar_val;
+ u32 maplen;
+
+ if (unlikely(slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)) {
+ netdev_err(dev, "BUG! not enough tx LEs left: %u\n",
+ slic_get_free_tx_descs(txq));
+ return NETDEV_TX_BUSY;
+ }
+
+ maplen = skb_headlen(skb);
+ paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&sdev->pdev->dev, paddr)) {
+ netdev_err(dev, "failed to map tx buffer\n");
+ goto drop_skb;
+ }
+
+ buff = &txq->txbuffs[txq->put_idx];
+ buff->skb = skb;
+ dma_unmap_addr_set(buff, map_addr, paddr);
+ dma_unmap_len_set(buff, map_len, maplen);
+
+ desc = buff->desc;
+ desc->totlen = cpu_to_le32(maplen);
+ desc->paddrl = cpu_to_le32(lower_32_bits(paddr));
+ desc->paddrh = cpu_to_le32(upper_32_bits(paddr));
+ desc->len = cpu_to_le32(maplen);
+
+ txq->put_idx = slic_next_queue_idx(txq->put_idx, txq->len);
+
+ cbar_val = lower_32_bits(buff->desc_paddr) | 1;
+ /* complete writes to RAM and DMA before hardware is informed */
+ wmb();
+
+ slic_write(sdev, SLIC_REG_CBAR, cbar_val);
+
+ if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)
+ netif_stop_queue(dev);
+ /* make sure writes to io-memory cant leak out of tx queue lock */
+ mmiowb();
+
+ return NETDEV_TX_OK;
+drop_skb:
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *lst)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ struct slic_stats *stats = &sdev->stats;
+
+ SLIC_GET_STATS_COUNTER(lst->rx_packets, stats, rx_packets);
+ SLIC_GET_STATS_COUNTER(lst->tx_packets, stats, tx_packets);
+ SLIC_GET_STATS_COUNTER(lst->rx_bytes, stats, rx_bytes);
+ SLIC_GET_STATS_COUNTER(lst->tx_bytes, stats, tx_bytes);
+ SLIC_GET_STATS_COUNTER(lst->rx_errors, stats, rx_errors);
+ SLIC_GET_STATS_COUNTER(lst->rx_dropped, stats, rx_buff_miss);
+ SLIC_GET_STATS_COUNTER(lst->tx_dropped, stats, tx_dropped);
+ SLIC_GET_STATS_COUNTER(lst->multicast, stats, rx_mcasts);
+ SLIC_GET_STATS_COUNTER(lst->rx_over_errors, stats, rx_buffoflow);
+ SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc);
+ SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802);
+ SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier);
+
+ return lst;
+}
+
+static int slic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(slic_stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void slic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *eth_stats, u64 *data)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ struct slic_stats *stats = &sdev->stats;
+
+ SLIC_GET_STATS_COUNTER(data[0], stats, rx_packets);
+ SLIC_GET_STATS_COUNTER(data[1], stats, rx_bytes);
+ SLIC_GET_STATS_COUNTER(data[2], stats, rx_mcasts);
+ SLIC_GET_STATS_COUNTER(data[3], stats, rx_errors);
+ SLIC_GET_STATS_COUNTER(data[4], stats, rx_buff_miss);
+ SLIC_GET_STATS_COUNTER(data[5], stats, rx_tpcsum);
+ SLIC_GET_STATS_COUNTER(data[6], stats, rx_tpoflow);
+ SLIC_GET_STATS_COUNTER(data[7], stats, rx_tphlen);
+ SLIC_GET_STATS_COUNTER(data[8], stats, rx_ipcsum);
+ SLIC_GET_STATS_COUNTER(data[9], stats, rx_iplen);
+ SLIC_GET_STATS_COUNTER(data[10], stats, rx_iphlen);
+ SLIC_GET_STATS_COUNTER(data[11], stats, rx_early);
+ SLIC_GET_STATS_COUNTER(data[12], stats, rx_buffoflow);
+ SLIC_GET_STATS_COUNTER(data[13], stats, rx_lcode);
+ SLIC_GET_STATS_COUNTER(data[14], stats, rx_drbl);
+ SLIC_GET_STATS_COUNTER(data[15], stats, rx_crc);
+ SLIC_GET_STATS_COUNTER(data[16], stats, rx_oflow802);
+ SLIC_GET_STATS_COUNTER(data[17], stats, rx_uflow802);
+ SLIC_GET_STATS_COUNTER(data[18], stats, tx_packets);
+ SLIC_GET_STATS_COUNTER(data[19], stats, tx_bytes);
+ SLIC_GET_STATS_COUNTER(data[20], stats, tx_carrier);
+ SLIC_GET_STATS_COUNTER(data[21], stats, tx_dropped);
+ SLIC_GET_STATS_COUNTER(data[22], stats, irq_errs);
+}
+
+static void slic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS) {
+ memcpy(data, slic_stats_strings, sizeof(slic_stats_strings));
+ data += sizeof(slic_stats_strings);
+ }
+}
+
+static void slic_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops slic_ethtool_ops = {
+ .get_drvinfo = slic_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = slic_get_strings,
+ .get_ethtool_stats = slic_get_ethtool_stats,
+ .get_sset_count = slic_get_sset_count,
+};
+
+static const struct net_device_ops slic_netdev_ops = {
+ .ndo_open = slic_open,
+ .ndo_stop = slic_close,
+ .ndo_start_xmit = slic_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = slic_get_stats,
+ .ndo_set_rx_mode = slic_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static u16 slic_eeprom_csum(unsigned char *eeprom, unsigned int len)
+{
+ unsigned char *ptr = eeprom;
+ u32 csum = 0;
+ __le16 data;
+
+ while (len > 1) {
+ memcpy(&data, ptr, sizeof(data));
+ csum += le16_to_cpu(data);
+ ptr += 2;
+ len -= 2;
+ }
+ if (len > 0)
+ csum += *(u8 *)ptr;
+ while (csum >> 16)
+ csum = (csum & 0xFFFF) + ((csum >> 16) & 0xFFFF);
+ return ~csum;
+}
+
+/* check eeprom size, magic and checksum */
+static bool slic_eeprom_valid(unsigned char *eeprom, unsigned int size)
+{
+ const unsigned int MAX_SIZE = 128;
+ const unsigned int MIN_SIZE = 98;
+ __le16 magic;
+ __le16 csum;
+
+ if (size < MIN_SIZE || size > MAX_SIZE)
+ return false;
+ memcpy(&magic, eeprom, sizeof(magic));
+ if (le16_to_cpu(magic) != SLIC_EEPROM_MAGIC)
+ return false;
+ /* cut checksum bytes */
+ size -= 2;
+ memcpy(&csum, eeprom + size, sizeof(csum));
+
+ return (le16_to_cpu(csum) == slic_eeprom_csum(eeprom, size));
+}
+
+static int slic_read_eeprom(struct slic_device *sdev)
+{
+ unsigned int devfn = PCI_FUNC(sdev->pdev->devfn);
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+ const unsigned int MAX_LOOPS = 5000;
+ unsigned int codesize;
+ unsigned char *eeprom;
+ struct slic_upr *upr;
+ unsigned int i = 0;
+ dma_addr_t paddr;
+ int err = 0;
+ u8 *mac[2];
+
+ eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
+ &paddr, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
+ /* setup ISP temporarily */
+ slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr));
+
+ err = slic_new_upr(sdev, SLIC_UPR_CONFIG, paddr);
+ if (!err) {
+ for (i = 0; i < MAX_LOOPS; i++) {
+ if (le32_to_cpu(sm_data->isr) & SLIC_ISR_UPC)
+ break;
+ mdelay(1);
+ }
+ if (i == MAX_LOOPS) {
+ dev_err(&sdev->pdev->dev,
+ "timed out while waiting for eeprom data\n");
+ err = -ETIMEDOUT;
+ }
+ upr = slic_dequeue_upr(sdev);
+ kfree(upr);
+ }
+
+ slic_write(sdev, SLIC_REG_ISP, 0);
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+
+ if (err)
+ goto free_eeprom;
+
+ if (sdev->model == SLIC_MODEL_OASIS) {
+ struct slic_oasis_eeprom *oee;
+
+ oee = (struct slic_oasis_eeprom *)eeprom;
+ mac[0] = oee->mac;
+ mac[1] = oee->mac2;
+ codesize = le16_to_cpu(oee->eeprom_code_size);
+ } else {
+ struct slic_mojave_eeprom *mee;
+
+ mee = (struct slic_mojave_eeprom *)eeprom;
+ mac[0] = mee->mac;
+ mac[1] = mee->mac2;
+ codesize = le16_to_cpu(mee->eeprom_code_size);
+ }
+
+ if (!slic_eeprom_valid(eeprom, codesize)) {
+ dev_err(&sdev->pdev->dev, "invalid checksum in eeprom\n");
+ err = -EINVAL;
+ goto free_eeprom;
+ }
+ /* set mac address */
+ ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]);
+free_eeprom:
+ dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr);
+
+ return err;
+}
+
+static int slic_init(struct slic_device *sdev)
+{
+ int err;
+
+ spin_lock_init(&sdev->upper_lock);
+ spin_lock_init(&sdev->link_lock);
+ INIT_LIST_HEAD(&sdev->upr_list.list);
+ spin_lock_init(&sdev->upr_list.lock);
+ u64_stats_init(&sdev->stats.syncp);
+
+ slic_card_reset(sdev);
+
+ err = slic_load_firmware(sdev);
+ if (err) {
+ dev_err(&sdev->pdev->dev, "failed to load firmware\n");
+ return err;
+ }
+
+ /* we need the shared memory to read EEPROM so set it up temporarily */
+ err = slic_init_shmem(sdev);
+ if (err) {
+ dev_err(&sdev->pdev->dev, "failed to init shared memory\n");
+ return err;
+ }
+
+ err = slic_read_eeprom(sdev);
+ if (err) {
+ dev_err(&sdev->pdev->dev, "failed to read eeprom\n");
+ goto free_sm;
+ }
+
+ slic_card_reset(sdev);
+ slic_free_shmem(sdev);
+
+ return 0;
+free_sm:
+ slic_free_shmem(sdev);
+
+ return err;
+}
+
+static bool slic_is_fiber(unsigned short subdev)
+{
+ switch (subdev) {
+ /* Mojave */
+ case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: /* fallthrough */
+ /* Oasis */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: /* fallthrough */
+ return true;
+ }
+ return false;
+}
+
+static void slic_configure_pci(struct pci_dev *pdev)
+{
+ u16 old;
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &old);
+
+ cmd = old | PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+ if (old != cmd)
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+}
+
+static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct slic_device *sdev;
+ struct net_device *dev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ slic_configure_pci(pdev);
+
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup DMA\n");
+ goto disable;
+ }
+
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "failed to obtain PCI regions\n");
+ goto disable;
+ }
+
+ dev = alloc_etherdev(sizeof(*sdev));
+ if (!dev) {
+ dev_err(&pdev->dev, "failed to alloc ethernet device\n");
+ err = -ENOMEM;
+ goto free_regions;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ pci_set_drvdata(pdev, dev);
+ dev->irq = pdev->irq;
+ dev->netdev_ops = &slic_netdev_ops;
+ dev->hw_features = NETIF_F_RXCSUM;
+ dev->features |= dev->hw_features;
+
+ dev->ethtool_ops = &slic_ethtool_ops;
+
+ sdev = netdev_priv(dev);
+ sdev->model = (pdev->device == PCI_DEVICE_ID_ALACRITECH_OASIS) ?
+ SLIC_MODEL_OASIS : SLIC_MODEL_MOJAVE;
+ sdev->is_fiber = slic_is_fiber(pdev->subsystem_device);
+ sdev->pdev = pdev;
+ sdev->netdev = dev;
+ sdev->regs = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!sdev->regs) {
+ dev_err(&pdev->dev, "failed to map registers\n");
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
+ err = slic_init(sdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize driver\n");
+ goto unmap;
+ }
+
+ netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT);
+ netif_carrier_off(dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device: %i\n", err);
+ goto unmap;
+ }
+
+ return 0;
+
+unmap:
+ iounmap(sdev->regs);
+free_netdev:
+ free_netdev(dev);
+free_regions:
+ pci_release_regions(pdev);
+disable:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void slic_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct slic_device *sdev = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ iounmap(sdev->regs);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver slic_driver = {
+ .name = DRV_NAME,
+ .id_table = slic_id_tbl,
+ .probe = slic_probe,
+ .remove = slic_remove,
+};
+
+static int __init slic_init_module(void)
+{
+ return pci_register_driver(&slic_driver);
+}
+
+static void __exit slic_cleanup_module(void)
+{
+ pci_unregister_driver(&slic_driver);
+}
+
+module_init(slic_init_module);
+module_exit(slic_cleanup_module);
+
+MODULE_DESCRIPTION("Alacritech non-accelerated SLIC driver");
+MODULE_AUTHOR("Lino Sanfilippo <LinoSanfilippo@gmx.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index bd8c80c0b71c..404c020cb82e 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -203,4 +203,14 @@ config BNXT_SRIOV
Virtualization support in the NetXtreme-C/E products. This
allows for virtual function acceleration in virtual environments.
+config BNXT_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on BNXT && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+
+ If unsure, say N.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 85a7800bfc12..5f19427c7b27 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1872,8 +1872,16 @@ static void bnx2x_get_ringparam(struct net_device *dev,
ering->rx_max_pending = MAX_RX_AVAIL;
+ /* If size isn't already set, we give an estimation of the number
+ * of buffers we'll have. We're neglecting some possible conditions
+ * [we couldn't know for certain at this point if number of queues
+ * might shrink] but the number would be correct for the likely
+ * scenario.
+ */
if (bp->rx_ring_size)
ering->rx_pending = bp->rx_ring_size;
+ else if (BNX2X_NUM_RX_QUEUES(bp))
+ ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp);
else
ering->rx_pending = MAX_RX_AVAIL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ab990da677d5..688617ac8c29 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10138,7 +10138,7 @@ static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
{
struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
- if (!netif_running(bp->dev) || !IS_PF(bp))
+ if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp))
return;
if (udp_port->count && udp_port->dst_port == port) {
@@ -10163,7 +10163,7 @@ static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
{
struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
- if (!IS_PF(bp))
+ if (!IS_PF(bp) || CHIP_IS_E1x(bp))
return;
if (!udp_port->count || udp_port->dst_port != port) {
@@ -13508,6 +13508,7 @@ static int bnx2x_init_firmware(struct bnx2x *bp)
/* Initialize the pointers to the init arrays */
/* Blob */
+ rc = -ENOMEM;
BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
/* Opcodes */
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 97e78e217928..6082ed1b5ea0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 0e4f168bea9e..9608cb49a11c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -52,8 +52,10 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
+#include "bnxt_dcb.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -186,11 +188,11 @@ static const u16 bnxt_vf_req_snif[] = {
};
static const u16 bnxt_async_events_arr[] = {
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
+ ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+ ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
};
static bool bnxt_vf_pciid(enum board_idx idx)
@@ -1476,8 +1478,8 @@ next_rx_no_prod:
}
#define BNXT_GET_EVENT_PORT(data) \
- ((data) & \
- HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+ ((data) & \
+ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
@@ -1486,7 +1488,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
+ case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
u32 data1 = le32_to_cpu(cmpl->event_data1);
struct bnxt_link_info *link_info = &bp->link_info;
@@ -1502,13 +1504,13 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
/* fall thru */
}
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+ case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
+ case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
u32 data1 = le32_to_cpu(cmpl->event_data1);
u16 port_id = BNXT_GET_EVENT_PORT(data1);
@@ -1521,18 +1523,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
break;
}
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
if (BNXT_PF(bp))
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
default:
- netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
- event_id);
goto async_event_process_exit;
}
schedule_work(&bp->sp_task);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -3116,27 +3117,46 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
return rc;
}
-static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
+ int bmap_size)
{
struct hwrm_func_drv_rgtr_input req = {0};
- int i;
DECLARE_BITMAP(async_events_bmap, 256);
u32 *events = (u32 *)async_events_bmap;
+ int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req.enables =
- cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
- FUNC_DRV_RGTR_REQ_ENABLES_VER |
- FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
memset(async_events_bmap, 0, sizeof(async_events_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ if (bmap && bmap_size) {
+ for (i = 0; i < bmap_size; i++) {
+ if (test_bit(i, bmap))
+ __set_bit(i, async_events_bmap);
+ }
+ }
+
for (i = 0; i < 8; i++)
req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_drv_rgtr_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+ req.enables =
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER);
+
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
req.ver_maj = DRV_VER_MAJ;
req.ver_min = DRV_VER_MIN;
@@ -3145,6 +3165,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
if (BNXT_PF(bp)) {
DECLARE_BITMAP(vf_req_snif_bmap, 256);
u32 *data = (u32 *)vf_req_snif_bmap;
+ int i;
memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
@@ -3526,7 +3547,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
return rc;
}
-static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{
unsigned int ring = 0, grp_idx;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -3574,6 +3595,9 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+ if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ req.flags |=
+ cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
@@ -4115,7 +4139,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- return 0;
+ return rc;
}
static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
@@ -4151,7 +4175,7 @@ func_qcfg_exit:
return rc;
}
-int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {0};
@@ -4165,6 +4189,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
+ if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
+ bp->flags |= BNXT_FLAG_ROCEV1_CAP;
+ if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
+ bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+
bp->tx_push_thresh = 0;
if (resp->flags &
cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
@@ -4261,12 +4290,16 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
goto qportcfg_exit;
}
bp->max_tc = resp->max_configurable_queues;
+ bp->max_lltc = resp->max_configurable_lossless_queues;
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
bp->max_tc = 1;
+ if (bp->max_lltc > bp->max_tc)
+ bp->max_lltc = bp->max_tc;
+
qptr = &resp->queue_id0;
for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++;
@@ -4738,16 +4771,134 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
return 0;
}
-static int bnxt_setup_msix(struct bnxt *bp)
+static void bnxt_setup_msix(struct bnxt *bp)
{
- struct msix_entry *msix_ent;
+ const int len = sizeof(bp->irq_tbl[0].name);
struct net_device *dev = bp->dev;
- int i, total_vecs, rc = 0, min = 1;
+ int tcs, i;
+
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1) {
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
+ if (bp->tx_nr_rings_per_tc == 0) {
+ netdev_reset_tc(dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ } else {
+ int i, off, count;
+
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ for (i = 0; i < tcs; i++) {
+ count = bp->tx_nr_rings_per_tc;
+ off = i * count;
+ netdev_set_tc_queue(dev, i, count, off);
+ }
+ }
+ }
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ char *attr;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ attr = "TxRx";
+ else if (i < bp->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
+ i);
+ bp->irq_tbl[i].handler = bnxt_msix;
+ }
+}
+
+static void bnxt_setup_inta(struct bnxt *bp)
+{
const int len = sizeof(bp->irq_tbl[0].name);
- bp->flags &= ~BNXT_FLAG_USING_MSIX;
- total_vecs = bp->cp_nr_rings;
+ if (netdev_get_num_tc(bp->dev))
+ netdev_reset_tc(bp->dev);
+
+ snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
+ 0);
+ bp->irq_tbl[0].handler = bnxt_inta;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+ int rc;
+
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ bnxt_setup_msix(bp);
+ else
+ bnxt_setup_inta(bp);
+
+ rc = bnxt_set_real_num_queues(bp);
+ return rc;
+}
+
+unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_stat_ctxs;
+#endif
+ return bp->pf.max_stat_ctxs;
+}
+
+void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ bp->vf.max_stat_ctxs = max;
+ else
+#endif
+ bp->pf.max_stat_ctxs = max;
+}
+
+unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_cp_rings;
+#endif
+ return bp->pf.max_cp_rings;
+}
+
+void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ bp->vf.max_cp_rings = max;
+ else
+#endif
+ bp->pf.max_cp_rings = max;
+}
+
+static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_irqs;
+#endif
+ return bp->pf.max_irqs;
+}
+
+void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ bp->vf.max_irqs = max_irqs;
+ else
+#endif
+ bp->pf.max_irqs = max_irqs;
+}
+
+static int bnxt_init_msix(struct bnxt *bp)
+{
+ int i, total_vecs, rc = 0, min = 1;
+ struct msix_entry *msix_ent;
+ total_vecs = bnxt_get_max_func_irqs(bp);
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;
@@ -4768,8 +4919,10 @@ static int bnxt_setup_msix(struct bnxt *bp)
bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
if (bp->irq_tbl) {
- int tcs;
+ for (i = 0; i < total_vecs; i++)
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
+ bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
total_vecs, min == 1);
@@ -4777,43 +4930,10 @@ static int bnxt_setup_msix(struct bnxt *bp)
goto msix_setup_exit;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- tcs = netdev_get_num_tc(dev);
- if (tcs > 1) {
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
- if (bp->tx_nr_rings_per_tc == 0) {
- netdev_reset_tc(dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- } else {
- int i, off, count;
-
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
- for (i = 0; i < tcs; i++) {
- count = bp->tx_nr_rings_per_tc;
- off = i * count;
- netdev_set_tc_queue(dev, i, count, off);
- }
- }
- }
- bp->cp_nr_rings = total_vecs;
-
- for (i = 0; i < bp->cp_nr_rings; i++) {
- char *attr;
+ bp->cp_nr_rings = (min == 1) ?
+ max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+ bp->tx_nr_rings + bp->rx_nr_rings;
- bp->irq_tbl[i].vector = msix_ent[i].vector;
- if (bp->flags & BNXT_FLAG_SHARED_RINGS)
- attr = "TxRx";
- else if (i < bp->rx_nr_rings)
- attr = "rx";
- else
- attr = "tx";
-
- snprintf(bp->irq_tbl[i].name, len,
- "%s-%s-%d", dev->name, attr, i);
- bp->irq_tbl[i].handler = bnxt_msix;
- }
- rc = bnxt_set_real_num_queues(bp);
- if (rc)
- goto msix_setup_exit;
} else {
rc = -ENOMEM;
goto msix_setup_exit;
@@ -4823,52 +4943,54 @@ static int bnxt_setup_msix(struct bnxt *bp)
return 0;
msix_setup_exit:
- netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+ netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
pci_disable_msix(bp->pdev);
kfree(msix_ent);
return rc;
}
-static int bnxt_setup_inta(struct bnxt *bp)
+static int bnxt_init_inta(struct bnxt *bp)
{
- int rc;
- const int len = sizeof(bp->irq_tbl[0].name);
-
- if (netdev_get_num_tc(bp->dev))
- netdev_reset_tc(bp->dev);
-
bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
- if (!bp->irq_tbl) {
- rc = -ENOMEM;
- return rc;
- }
+ if (!bp->irq_tbl)
+ return -ENOMEM;
+
+ bp->total_irqs = 1;
bp->rx_nr_rings = 1;
bp->tx_nr_rings = 1;
bp->cp_nr_rings = 1;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
bp->flags |= BNXT_FLAG_SHARED_RINGS;
bp->irq_tbl[0].vector = bp->pdev->irq;
- snprintf(bp->irq_tbl[0].name, len,
- "%s-%s-%d", bp->dev->name, "TxRx", 0);
- bp->irq_tbl[0].handler = bnxt_inta;
- rc = bnxt_set_real_num_queues(bp);
- return rc;
+ return 0;
}
-static int bnxt_setup_int_mode(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp)
{
int rc = 0;
if (bp->flags & BNXT_FLAG_MSIX_CAP)
- rc = bnxt_setup_msix(bp);
+ rc = bnxt_init_msix(bp);
if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
/* fallback to INTA */
- rc = bnxt_setup_inta(bp);
+ rc = bnxt_init_inta(bp);
}
return rc;
}
+static void bnxt_clear_int_mode(struct bnxt *bp)
+{
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ pci_disable_msix(bp->pdev);
+
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -4887,10 +5009,6 @@ static void bnxt_free_irq(struct bnxt *bp)
free_irq(irq->vector, bp->bnapi[i]);
irq->requested = 0;
}
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- pci_disable_msix(bp->pdev);
- kfree(bp->irq_tbl);
- bp->irq_tbl = NULL;
}
static int bnxt_request_irq(struct bnxt *bp)
@@ -4993,7 +5111,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
}
}
-static void bnxt_tx_disable(struct bnxt *bp)
+void bnxt_tx_disable(struct bnxt *bp)
{
int i;
struct bnxt_tx_ring_info *txr;
@@ -5011,7 +5129,7 @@ static void bnxt_tx_disable(struct bnxt *bp)
netif_carrier_off(bp->dev);
}
-static void bnxt_tx_enable(struct bnxt *bp)
+void bnxt_tx_enable(struct bnxt *bp)
{
int i;
struct bnxt_tx_ring_info *txr;
@@ -5561,22 +5679,7 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = 0;
- if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
- rc = bnxt_hwrm_func_reset(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
- rc);
- rc = -EBUSY;
- return rc;
- }
- /* Do func_reset during the 1st PF open only to prevent killing
- * the VFs when the PF is brought down and up.
- */
- if (BNXT_PF(bp))
- set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
- }
return __bnxt_open_nic(bp, true, true);
}
@@ -6337,17 +6440,10 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *ntc)
+int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
{
struct bnxt *bp = netdev_priv(dev);
bool sh = false;
- u8 tc;
-
- if (ntc->type != TC_SETUP_MQPRIO)
- return -EINVAL;
-
- tc = ntc->tc;
if (tc > bp->max_tc) {
netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
@@ -6390,6 +6486,15 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return 0;
}
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *ntc)
+{
+ if (ntc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ return bnxt_setup_mq_tc(dev, ntc->tc);
+}
+
#ifdef CONFIG_RFS_ACCEL
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
@@ -6678,11 +6783,15 @@ static void bnxt_remove_one(struct pci_dev *pdev)
cancel_work_sync(&bp->sp_task);
bp->sp_event = 0;
+ bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_dcb_free(bp);
pci_iounmap(pdev, bp->bar2);
pci_iounmap(pdev, bp->bar1);
pci_iounmap(pdev, bp->bar0);
+ kfree(bp->edev);
+ bp->edev = NULL;
free_netdev(dev);
pci_release_regions(pdev);
@@ -6791,6 +6900,39 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
}
+static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ bool shared)
+{
+ int rc;
+
+ rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+ if (rc)
+ return rc;
+
+ if (bp->flags & BNXT_FLAG_ROCE_CAP) {
+ int max_cp, max_stat, max_irq;
+
+ /* Reserve minimum resources for RoCE */
+ max_cp = bnxt_get_max_func_cp_rings(bp);
+ max_stat = bnxt_get_max_func_stat_ctxs(bp);
+ max_irq = bnxt_get_max_func_irqs(bp);
+ if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
+ max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
+ max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
+ return 0;
+
+ max_cp -= BNXT_MIN_ROCE_CP_RINGS;
+ max_irq -= BNXT_MIN_ROCE_CP_RINGS;
+ max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
+ max_cp = min_t(int, max_cp, max_irq);
+ max_cp = min_t(int, max_cp, max_stat);
+ rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
+ if (rc)
+ rc = 0;
+ }
+ return rc;
+}
+
static int bnxt_set_dflt_rings(struct bnxt *bp)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
@@ -6799,7 +6941,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues();
- rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
+ rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc)
return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
@@ -6815,6 +6957,13 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
return rc;
}
+void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+{
+ ASSERT_RTNL();
+ bnxt_hwrm_func_qcaps(bp);
+ bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+}
+
static void bnxt_parse_log_pcie_link(struct bnxt *bp)
{
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -6907,6 +7056,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = 9500;
+ bnxt_dcb_init(bp);
+
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
@@ -6918,6 +7069,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err;
+ rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ if (rc)
+ goto init_err;
+
+ bp->ulp_probe = bnxt_ulp_probe;
+
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
@@ -6939,12 +7096,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
- if (BNXT_PF(bp))
- bp->pf.max_irqs = max_irqs;
-#if defined(CONFIG_BNXT_SRIOV)
- else
- bp->vf.max_irqs = max_irqs;
-#endif
+ bnxt_set_max_func_irqs(bp, max_irqs);
bnxt_set_dflt_rings(bp);
/* Default RSS hash cfg. */
@@ -6975,10 +7127,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err;
- rc = register_netdev(dev);
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ goto init_err;
+
+ rc = bnxt_init_int_mode(bp);
if (rc)
goto init_err;
+ rc = register_netdev(dev);
+ if (rc)
+ goto init_err_clr_int;
+
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr);
@@ -6987,6 +7147,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+init_err_clr_int:
+ bnxt_clear_int_mode(bp);
+
init_err:
pci_iounmap(pdev, bp->bar0);
pci_release_regions(pdev);
@@ -7016,6 +7179,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
netif_device_detach(netdev);
+ bnxt_ulp_stop(bp);
+
if (state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
@@ -7024,8 +7189,6 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
bnxt_close(netdev);
- /* So that func_reset will be done during slot_reset */
- clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
pci_disable_device(pdev);
rtnl_unlock();
@@ -7059,11 +7222,14 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
- if (netif_running(netdev))
+ err = bnxt_hwrm_func_reset(bp);
+ if (!err && netif_running(netdev))
err = bnxt_open(netdev);
- if (!err)
+ if (!err) {
result = PCI_ERS_RESULT_RECOVERED;
+ bnxt_ulp_start(bp);
+ }
}
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 47be7894c67b..16defe9ececc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,10 +11,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.5.0"
+#define DRV_MODULE_VERSION "1.6.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 5
+#define DRV_VER_MIN 6
#define DRV_VER_UPD 0
struct tx_bd {
@@ -387,6 +387,9 @@ struct rx_tpa_end_cmp_ext {
#define DB_KEY_TX_PUSH (0x4 << 28)
#define DB_LONG_TX_PUSH (0x2 << 24)
+#define BNXT_MIN_ROCE_CP_RINGS 2
+#define BNXT_MIN_ROCE_STAT_CTXS 1
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -953,6 +956,10 @@ struct bnxt {
#define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000
+ #define BNXT_FLAG_ROCEV1_CAP 0x8000
+ #define BNXT_FLAG_ROCEV2_CAP 0x10000
+ #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \
+ BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -965,6 +972,9 @@ struct bnxt {
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
+ struct bnxt_en_dev *edev;
+ struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
+
struct bnxt_napi **bnapi;
struct bnxt_rx_ring_info *rx_ring;
@@ -1010,6 +1020,7 @@ struct bnxt {
u32 rss_hash_cfg;
u8 max_tc;
+ u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
unsigned int current_interval;
@@ -1020,11 +1031,18 @@ struct bnxt {
unsigned long state;
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
-#define BNXT_STATE_FN_RST_DONE 2
struct bnxt_irq *irq_tbl;
+ int total_irqs;
u8 mac_addr[ETH_ALEN];
+#ifdef CONFIG_BNXT_DCB
+ struct ieee_pfc *ieee_pfc;
+ struct ieee_ets *ieee_ets;
+ u8 dcbx_cap;
+ u8 default_pri;
+#endif /* CONFIG_BNXT_DCB */
+
u32 msg_enable;
u32 hwrm_spec_code;
@@ -1116,6 +1134,13 @@ struct bnxt {
u32 lpi_tmr_hi;
};
+#define BNXT_RX_STATS_OFFSET(counter) \
+ (offsetof(struct rx_port_stats, counter) / 8)
+
+#define BNXT_TX_STATS_OFFSET(counter) \
+ ((offsetof(struct tx_port_stats, counter) + \
+ sizeof(struct rx_port_stats) + 512) / 8)
+
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
{
@@ -1218,12 +1243,23 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
+ int bmap_size);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int bnxt_hwrm_set_coal(struct bnxt *);
-int bnxt_hwrm_func_qcaps(struct bnxt *);
+unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
+void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
+unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
+void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
+void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
+void bnxt_tx_disable(struct bnxt *bp);
+void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
+void bnxt_restore_pf_fw_resources(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
new file mode 100644
index 000000000000..fdf2d8caf7bf
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -0,0 +1,502 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_dcb.h"
+
+#ifdef CONFIG_BNXT_DCB
+static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
+{
+ struct hwrm_queue_pri2cos_cfg_input req = {0};
+ int rc = 0, i;
+ u8 *pri2cos;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
+ req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
+ QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
+
+ pri2cos = &req.pri0_cos_queue_id;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ req.enables |= cpu_to_le32(
+ QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
+
+ pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
+}
+
+static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
+{
+ struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_pri2cos_qcfg_input req = {0};
+ int rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
+ req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ u8 *pri2cos = &resp->pri0_cos_queue_id;
+ int i, j;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 queue_id = pri2cos[i];
+
+ for (j = 0; j < bp->max_tc; j++) {
+ if (bp->q_info[j].queue_id == queue_id) {
+ ets->prio_tc[i] = j;
+ break;
+ }
+ }
+ }
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
+ u8 max_tc)
+{
+ struct hwrm_queue_cos2bw_cfg_input req = {0};
+ struct bnxt_cos2bw_cfg cos2bw;
+ int rc = 0, i;
+ void *data;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
+ data = &req.unused_0;
+ for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
+ req.enables |= cpu_to_le32(
+ QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+
+ memset(&cos2bw, 0, sizeof(cos2bw));
+ cos2bw.queue_id = bp->q_info[i].queue_id;
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
+ cos2bw.tsa =
+ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
+ cos2bw.pri_lvl = i;
+ } else {
+ cos2bw.tsa =
+ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
+ cos2bw.bw_weight = ets->tc_tx_bw[i];
+ }
+ memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
+ if (i == 0) {
+ req.queue_id0 = cos2bw.queue_id;
+ req.unused_0 = 0;
+ }
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
+}
+
+static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
+{
+ struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_cos2bw_qcfg_input req = {0};
+ struct bnxt_cos2bw_cfg cos2bw;
+ void *data;
+ int rc, i;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
+ for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
+ int j;
+
+ memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
+ if (i == 0)
+ cos2bw.queue_id = resp->queue_id0;
+
+ for (j = 0; j < bp->max_tc; j++) {
+ if (bp->q_info[j].queue_id != cos2bw.queue_id)
+ continue;
+ if (cos2bw.tsa ==
+ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
+ ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
+ } else {
+ ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
+ ets->tc_tx_bw[j] = cos2bw.bw_weight;
+ }
+ }
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
+{
+ struct hwrm_queue_cfg_input req = {0};
+ int i;
+
+ if (netif_running(bp->dev))
+ bnxt_tx_disable(bp);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
+ req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
+ req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
+
+ /* Configure lossless queues to lossy first */
+ req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
+ for (i = 0; i < bp->max_tc; i++) {
+ if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
+ req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
+ hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ bp->q_info[i].queue_profile =
+ QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
+ }
+ }
+
+ /* Now configure desired queues to lossless */
+ req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
+ for (i = 0; i < bp->max_tc; i++) {
+ if (lltc_mask & (1 << i)) {
+ req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
+ hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ bp->q_info[i].queue_profile =
+ QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
+ }
+ }
+ if (netif_running(bp->dev))
+ bnxt_tx_enable(bp);
+
+ return 0;
+}
+
+static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
+{
+ struct hwrm_queue_pfcenable_cfg_input req = {0};
+ struct ieee_ets *my_ets = bp->ieee_ets;
+ unsigned int tc_mask = 0, pri_mask = 0;
+ u8 i, pri, lltc_count = 0;
+ bool need_q_recfg = false;
+ int rc;
+
+ if (!my_ets)
+ return -EINVAL;
+
+ for (i = 0; i < bp->max_tc; i++) {
+ for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
+ if ((pfc->pfc_en & (1 << pri)) &&
+ (my_ets->prio_tc[pri] == i)) {
+ pri_mask |= 1 << pri;
+ tc_mask |= 1 << i;
+ }
+ }
+ if (tc_mask & (1 << i))
+ lltc_count++;
+ }
+ if (lltc_count > bp->max_lltc)
+ return -EINVAL;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
+ req.flags = cpu_to_le32(pri_mask);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < bp->max_tc; i++) {
+ if (tc_mask & (1 << i)) {
+ if (!BNXT_LLQ(bp->q_info[i].queue_profile))
+ need_q_recfg = true;
+ }
+ }
+
+ if (need_q_recfg)
+ rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
+
+ return rc;
+}
+
+static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
+{
+ struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_pfcenable_qcfg_input req = {0};
+ u8 pri_mask;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ pri_mask = le32_to_cpu(resp->flags);
+ pfc->pfc_en = pri_mask;
+ return 0;
+}
+
+static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
+{
+ int total_ets_bw = 0;
+ u8 max_tc = 0;
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (ets->prio_tc[i] > bp->max_tc) {
+ netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
+ ets->prio_tc[i]);
+ return -EINVAL;
+ }
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+
+ if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
+ return -EINVAL;
+
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ total_ets_bw += ets->tc_tx_bw[i];
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+ if (total_ets_bw > 100)
+ return -EINVAL;
+
+ *tc = max_tc + 1;
+ return 0;
+}
+
+static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct ieee_ets *my_ets = bp->ieee_ets;
+
+ ets->ets_cap = bp->max_tc;
+
+ if (!my_ets) {
+ int rc;
+
+ if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return 0;
+
+ my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
+ if (!my_ets)
+ return 0;
+ rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
+ if (rc)
+ return 0;
+ rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
+ if (rc)
+ return 0;
+ }
+
+ ets->cbs = my_ets->cbs;
+ memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ return 0;
+}
+
+static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct ieee_ets *my_ets = bp->ieee_ets;
+ u8 max_tc = 0;
+ int rc, i;
+
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ return -EINVAL;
+
+ rc = bnxt_ets_validate(bp, ets, &max_tc);
+ if (!rc) {
+ if (!my_ets) {
+ my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
+ if (!my_ets)
+ return -ENOMEM;
+ /* initialize PRI2TC mappings to invalid value */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ my_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS;
+ bp->ieee_ets = my_ets;
+ }
+ rc = bnxt_setup_mq_tc(dev, max_tc);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets);
+ if (rc)
+ return rc;
+ memcpy(my_ets, ets, sizeof(*my_ets));
+ }
+ return rc;
+}
+
+static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
+ struct ieee_pfc *my_pfc = bp->ieee_pfc;
+ long rx_off, tx_off;
+ int i, rc;
+
+ pfc->pfc_cap = bp->max_lltc;
+
+ if (!my_pfc) {
+ if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return 0;
+
+ my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
+ if (!my_pfc)
+ return 0;
+ bp->ieee_pfc = my_pfc;
+ rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
+ if (rc)
+ return 0;
+ }
+
+ pfc->pfc_en = my_pfc->pfc_en;
+ pfc->mbc = my_pfc->mbc;
+ pfc->delay = my_pfc->delay;
+
+ if (!stats)
+ return 0;
+
+ rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
+ tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
+ pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
+ pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
+ }
+
+ return 0;
+}
+
+static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct ieee_pfc *my_pfc = bp->ieee_pfc;
+ int rc;
+
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ return -EINVAL;
+
+ if (!my_pfc) {
+ my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
+ if (!my_pfc)
+ return -ENOMEM;
+ bp->ieee_pfc = my_pfc;
+ }
+ rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
+ if (!rc)
+ memcpy(my_pfc, pfc, sizeof(*my_pfc));
+
+ return rc;
+}
+
+static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = -EINVAL;
+
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ return -EINVAL;
+
+ rc = dcb_ieee_setapp(dev, app);
+ return rc;
+}
+
+static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ rc = dcb_ieee_delapp(dev, app);
+ return rc;
+}
+
+static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return bp->dcbx_cap;
+}
+
+static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ /* only support IEEE */
+ if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
+ return 1;
+
+ if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp))
+ return 1;
+
+ if (mode == bp->dcbx_cap)
+ return 0;
+
+ bp->dcbx_cap = mode;
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = bnxt_dcbnl_ieee_getets,
+ .ieee_setets = bnxt_dcbnl_ieee_setets,
+ .ieee_getpfc = bnxt_dcbnl_ieee_getpfc,
+ .ieee_setpfc = bnxt_dcbnl_ieee_setpfc,
+ .ieee_setapp = bnxt_dcbnl_ieee_setapp,
+ .ieee_delapp = bnxt_dcbnl_ieee_delapp,
+ .getdcbx = bnxt_dcbnl_getdcbx,
+ .setdcbx = bnxt_dcbnl_setdcbx,
+};
+
+void bnxt_dcb_init(struct bnxt *bp)
+{
+ if (bp->hwrm_spec_code < 0x10501)
+ return;
+
+ bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
+ if (BNXT_PF(bp))
+ bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
+ else
+ bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
+ bp->dev->dcbnl_ops = &dcbnl_ops;
+}
+
+void bnxt_dcb_free(struct bnxt *bp)
+{
+ kfree(bp->ieee_pfc);
+ kfree(bp->ieee_ets);
+ bp->ieee_pfc = NULL;
+ bp->ieee_ets = NULL;
+}
+
+#else
+
+void bnxt_dcb_init(struct bnxt *bp)
+{
+}
+
+void bnxt_dcb_free(struct bnxt *bp)
+{
+}
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
new file mode 100644
index 000000000000..35a0d28cf2fd
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -0,0 +1,41 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_DCB_H
+#define BNXT_DCB_H
+
+#include <net/dcbnl.h>
+
+struct bnxt_dcb {
+ u8 max_tc;
+ struct ieee_pfc *ieee_pfc;
+ struct ieee_ets *ieee_ets;
+ u8 dcbx_cap;
+ u8 default_pri;
+};
+
+struct bnxt_cos2bw_cfg {
+ u8 pad[3];
+ u8 queue_id;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 tsa;
+ u8 pri_lvl;
+ u8 bw_weight;
+ u8 unused;
+};
+
+#define BNXT_LLQ(q_profile) \
+ ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS)
+
+#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
+
+void bnxt_dcb_init(struct bnxt *bp);
+void bnxt_dcb_free(struct bnxt *bp);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index fa6125eb24af..784aa77610bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -107,16 +107,9 @@ static int bnxt_set_coalesce(struct net_device *dev,
#define BNXT_NUM_STATS 21
-#define BNXT_RX_STATS_OFFSET(counter) \
- (offsetof(struct rx_port_stats, counter) / 8)
-
#define BNXT_RX_STATS_ENTRY(counter) \
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
-#define BNXT_TX_STATS_OFFSET(counter) \
- ((offsetof(struct tx_port_stats, counter) + \
- sizeof(struct rx_port_stats) + 512) / 8)
-
#define BNXT_TX_STATS_ENTRY(counter) \
{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
@@ -150,6 +143,14 @@ static const struct {
BNXT_RX_STATS_ENTRY(rx_tagged_frames),
BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
BNXT_RX_STATS_ENTRY(rx_good_frames),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
@@ -179,6 +180,14 @@ static const struct {
BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
BNXT_TX_STATS_ENTRY(tx_err),
BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
BNXT_TX_STATS_ENTRY(tx_total_collisions),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 0456d5b5d689..2ddfa51519a1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -10,29 +11,22 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* per-context HW statistics -- chip view */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
+/* HSI and HWRM Specification 1.6.0 */
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 6
+#define HWRM_VERSION_UPDATE 0
+
+#define HWRM_VERSION_STR "1.6.0"
+/*
+ * Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
/* Statistics Ejection Buffer Completion Record (16 bytes) */
struct eject_cmpl {
@@ -50,77 +44,77 @@ struct eject_cmpl {
/* HWRM Completion Record (16 bytes) */
struct hwrm_cmpl {
__le16 type;
- #define HWRM_CMPL_TYPE_MASK 0x3fUL
- #define HWRM_CMPL_TYPE_SFT 0
- #define HWRM_CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
__le16 sequence_id;
__le32 unused_1;
__le32 v;
- #define HWRM_CMPL_V 0x1UL
+ #define CMPL_V 0x1UL
__le32 unused_3;
};
/* HWRM Forwarded Request (16 bytes) */
struct hwrm_fwd_req_cmpl {
__le16 req_len_type;
- #define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
- #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
__le16 source_id;
__le32 unused_0;
__le32 req_buf_addr_v[2];
- #define HWRM_FWD_REQ_CMPL_V 0x1UL
- #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
};
/* HWRM Forwarded Response (16 bytes) */
struct hwrm_fwd_resp_cmpl {
__le16 type;
- #define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
- #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
__le16 source_id;
__le16 resp_len;
__le16 unused_1;
__le32 resp_buf_addr_v[2];
- #define HWRM_FWD_RESP_CMPL_V 0x1UL
- #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
};
/* HWRM Asynchronous Event Completion Record (16 bytes) */
struct hwrm_async_event_cmpl {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
@@ -129,686 +123,391 @@ struct hwrm_async_event_cmpl {
/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
struct hwrm_async_event_cmpl_link_status_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
};
/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
struct hwrm_async_event_cmpl_link_mtu_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
};
/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
struct hwrm_async_event_cmpl_link_speed_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
};
/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
struct hwrm_async_event_cmpl_dcb_config_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
__le32 event_data2;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
};
/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
struct hwrm_async_event_cmpl_port_conn_not_allowed {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
};
/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
struct hwrm_async_event_cmpl_link_speed_cfg_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
struct hwrm_async_event_cmpl_func_drvr_unload {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
struct hwrm_async_event_cmpl_func_drvr_load {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
+struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
struct hwrm_async_event_cmpl_pf_drvr_unload {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
};
/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
struct hwrm_async_event_cmpl_pf_drvr_load {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
};
/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
struct hwrm_async_event_cmpl_vf_flr {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
struct hwrm_async_event_cmpl_vf_mac_addr_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
};
/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
};
/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* HW Resource Manager Specification 1.5.4 */
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 5
-#define HWRM_VERSION_UPDATE 4
-
-#define HWRM_VERSION_STR "1.5.4"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
-/* Input (16 bytes) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* Output (8 bytes) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* Command numbering (8 bytes) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET (0x0UL)
- #define HWRM_FUNC_BUF_UNRGTR (0xeUL)
- #define HWRM_FUNC_VF_CFG (0xfUL)
- #define RESERVED1 (0x10UL)
- #define HWRM_FUNC_RESET (0x11UL)
- #define HWRM_FUNC_GETFID (0x12UL)
- #define HWRM_FUNC_VF_ALLOC (0x13UL)
- #define HWRM_FUNC_VF_FREE (0x14UL)
- #define HWRM_FUNC_QCAPS (0x15UL)
- #define HWRM_FUNC_QCFG (0x16UL)
- #define HWRM_FUNC_CFG (0x17UL)
- #define HWRM_FUNC_QSTATS (0x18UL)
- #define HWRM_FUNC_CLR_STATS (0x19UL)
- #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
- #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
- #define HWRM_FUNC_DRV_RGTR (0x1dUL)
- #define HWRM_FUNC_DRV_QVER (0x1eUL)
- #define HWRM_FUNC_BUF_RGTR (0x1fUL)
- #define HWRM_PORT_PHY_CFG (0x20UL)
- #define HWRM_PORT_MAC_CFG (0x21UL)
- #define HWRM_PORT_TS_QUERY (0x22UL)
- #define HWRM_PORT_QSTATS (0x23UL)
- #define HWRM_PORT_LPBK_QSTATS (0x24UL)
- #define HWRM_PORT_CLR_STATS (0x25UL)
- #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
- #define HWRM_PORT_PHY_QCFG (0x27UL)
- #define HWRM_PORT_MAC_QCFG (0x28UL)
- #define HWRM_PORT_BLINK_LED (0x29UL)
- #define HWRM_PORT_PHY_QCAPS (0x2aUL)
- #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
- #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
- #define HWRM_QUEUE_QPORTCFG (0x30UL)
- #define HWRM_QUEUE_QCFG (0x31UL)
- #define HWRM_QUEUE_CFG (0x32UL)
- #define RESERVED2 (0x33UL)
- #define RESERVED3 (0x34UL)
- #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
- #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
- #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
- #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
- #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
- #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
- #define HWRM_VNIC_ALLOC (0x40UL)
- #define HWRM_VNIC_FREE (0x41UL)
- #define HWRM_VNIC_CFG (0x42UL)
- #define HWRM_VNIC_QCFG (0x43UL)
- #define HWRM_VNIC_TPA_CFG (0x44UL)
- #define HWRM_VNIC_TPA_QCFG (0x45UL)
- #define HWRM_VNIC_RSS_CFG (0x46UL)
- #define HWRM_VNIC_RSS_QCFG (0x47UL)
- #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
- #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
- #define HWRM_VNIC_QCAPS (0x4aUL)
- #define HWRM_RING_ALLOC (0x50UL)
- #define HWRM_RING_FREE (0x51UL)
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
- #define HWRM_RING_RESET (0x5eUL)
- #define HWRM_RING_GRP_ALLOC (0x60UL)
- #define HWRM_RING_GRP_FREE (0x61UL)
- #define RESERVED5 (0x64UL)
- #define RESERVED6 (0x65UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
- #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
- #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
- #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
- #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define RESERVED4 (0x94UL)
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
- #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
- #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
- #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
- #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
- #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
- #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL)
- #define HWRM_CFA_EM_FLOW_FREE (0x9dUL)
- #define HWRM_CFA_EM_FLOW_CFG (0x9eUL)
- #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
- #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
- #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
- #define HWRM_STAT_CTX_ALLOC (0xb0UL)
- #define HWRM_STAT_CTX_FREE (0xb1UL)
- #define HWRM_STAT_CTX_QUERY (0xb2UL)
- #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
- #define HWRM_FW_RESET (0xc0UL)
- #define HWRM_FW_QSTATUS (0xc1UL)
- #define HWRM_FW_SET_TIME (0xc8UL)
- #define HWRM_FW_GET_TIME (0xc9UL)
- #define HWRM_FW_SET_STRUCTURED_DATA (0xcaUL)
- #define HWRM_FW_GET_STRUCTURED_DATA (0xcbUL)
- #define HWRM_FW_IPC_MAILBOX (0xccUL)
- #define HWRM_EXEC_FWD_RESP (0xd0UL)
- #define HWRM_REJECT_FWD_RESP (0xd1UL)
- #define HWRM_FWD_RESP (0xd2UL)
- #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
- #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
- #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
- #define HWRM_WOL_FILTER_FREE (0xf1UL)
- #define HWRM_WOL_FILTER_QCFG (0xf2UL)
- #define HWRM_WOL_REASON_QCFG (0xf3UL)
- #define HWRM_DBG_READ_DIRECT (0xff10UL)
- #define HWRM_DBG_READ_INDIRECT (0xff11UL)
- #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
- #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
- #define HWRM_DBG_DUMP (0xff14UL)
- #define HWRM_NVM_GET_VARIABLE (0xfff1UL)
- #define HWRM_NVM_SET_VARIABLE (0xfff2UL)
- #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
- #define HWRM_NVM_MODIFY (0xfff4UL)
- #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
- #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
- #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
- #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
- #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
- #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
- #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
- #define HWRM_NVM_RAW_DUMP (0xfffcUL)
- #define HWRM_NVM_READ (0xfffdUL)
- #define HWRM_NVM_WRITE (0xfffeUL)
- #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS (0x0UL)
- #define HWRM_ERR_CODE_FAIL (0x1UL)
- #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
- #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
- #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
- #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
- #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-
-/* Port Tx Statistics Formats (408 bytes) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* Port Rx Statistics Formats (528 bytes) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
/* hwrm_ver_get */
@@ -1057,6 +756,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
#define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
#define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1106,6 +806,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1182,6 +883,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL
#define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL
#define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE 0x200UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1548,6 +1250,7 @@ struct hwrm_func_drv_qver_output {
#define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -2109,31 +1812,6 @@ struct hwrm_port_lpbk_clr_stats_output {
u8 valid;
};
-/* hwrm_port_blink_led */
-/* Input (24 bytes) */
-struct hwrm_port_blink_led_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 num_blinks;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_port_blink_led_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_port_phy_qcaps */
/* Input (24 bytes) */
struct hwrm_port_phy_qcaps_input {
@@ -2355,6 +2033,39 @@ struct hwrm_queue_cfg_output {
u8 valid;
};
+/* hwrm_queue_pfcenable_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
/* hwrm_queue_pfcenable_cfg */
/* Input (24 bytes) */
struct hwrm_queue_pfcenable_cfg_input {
@@ -2389,6 +2100,48 @@ struct hwrm_queue_pfcenable_cfg_output {
u8 valid;
};
+/* hwrm_queue_pri2cos_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* Output (24 bytes) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
/* hwrm_queue_pri2cos_cfg */
/* Input (40 bytes) */
struct hwrm_queue_pri2cos_cfg_input {
@@ -2439,6 +2192,257 @@ struct hwrm_queue_pri2cos_cfg_output {
u8 valid;
};
+/* hwrm_queue_cos2bw_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (112 bytes) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 valid;
+};
+
/* hwrm_queue_cos2bw_cfg */
/* Input (128 bytes) */
struct hwrm_queue_cos2bw_cfg_input {
@@ -3820,7 +3824,9 @@ struct hwrm_stat_ctx_alloc_input {
__le64 resp_addr;
__le64 stats_dma_addr;
__le32 update_period_ms;
- __le32 unused_0;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ u8 unused_0[3];
};
/* Output (16 bytes) */
@@ -4052,7 +4058,9 @@ struct hwrm_fw_set_structured_data_input {
__le64 src_data_addr;
__le16 data_len;
u8 hdr_cnt;
- u8 unused_0[5];
+ u8 unused_0;
+ __le16 port_id;
+ __le16 unused_1;
};
/* Output (16 bytes) */
@@ -4069,7 +4077,7 @@ struct hwrm_fw_set_structured_data_output {
};
/* hwrm_fw_get_structured_data */
-/* Input (32 bytes) */
+/* Input (40 bytes) */
struct hwrm_fw_get_structured_data_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -4089,6 +4097,8 @@ struct hwrm_fw_get_structured_data_input {
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
u8 count;
u8 unused_0;
+ __le16 port_id;
+ __le16 unused_1[3];
};
/* Output (16 bytes) */
@@ -4598,4 +4608,363 @@ struct hwrm_nvm_install_update_output {
u8 valid;
};
+/* Hardware Resource Manager Specification */
+/* Input (16 bytes) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET (0x0UL)
+ #define HWRM_FUNC_BUF_UNRGTR (0xeUL)
+ #define HWRM_FUNC_VF_CFG (0xfUL)
+ #define RESERVED1 (0x10UL)
+ #define HWRM_FUNC_RESET (0x11UL)
+ #define HWRM_FUNC_GETFID (0x12UL)
+ #define HWRM_FUNC_VF_ALLOC (0x13UL)
+ #define HWRM_FUNC_VF_FREE (0x14UL)
+ #define HWRM_FUNC_QCAPS (0x15UL)
+ #define HWRM_FUNC_QCFG (0x16UL)
+ #define HWRM_FUNC_CFG (0x17UL)
+ #define HWRM_FUNC_QSTATS (0x18UL)
+ #define HWRM_FUNC_CLR_STATS (0x19UL)
+ #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
+ #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
+ #define HWRM_FUNC_DRV_RGTR (0x1dUL)
+ #define HWRM_FUNC_DRV_QVER (0x1eUL)
+ #define HWRM_FUNC_BUF_RGTR (0x1fUL)
+ #define HWRM_PORT_PHY_CFG (0x20UL)
+ #define HWRM_PORT_MAC_CFG (0x21UL)
+ #define HWRM_PORT_TS_QUERY (0x22UL)
+ #define HWRM_PORT_QSTATS (0x23UL)
+ #define HWRM_PORT_LPBK_QSTATS (0x24UL)
+ #define HWRM_PORT_CLR_STATS (0x25UL)
+ #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
+ #define HWRM_PORT_PHY_QCFG (0x27UL)
+ #define HWRM_PORT_MAC_QCFG (0x28UL)
+ #define RESERVED7 (0x29UL)
+ #define HWRM_PORT_PHY_QCAPS (0x2aUL)
+ #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
+ #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
+ #define HWRM_PORT_LED_CFG (0x2dUL)
+ #define HWRM_PORT_LED_QCFG (0x2eUL)
+ #define HWRM_PORT_LED_QCAPS (0x2fUL)
+ #define HWRM_QUEUE_QPORTCFG (0x30UL)
+ #define HWRM_QUEUE_QCFG (0x31UL)
+ #define HWRM_QUEUE_CFG (0x32UL)
+ #define RESERVED2 (0x33UL)
+ #define RESERVED3 (0x34UL)
+ #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
+ #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
+ #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
+ #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
+ #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
+ #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
+ #define HWRM_VNIC_ALLOC (0x40UL)
+ #define HWRM_VNIC_FREE (0x41UL)
+ #define HWRM_VNIC_CFG (0x42UL)
+ #define HWRM_VNIC_QCFG (0x43UL)
+ #define HWRM_VNIC_TPA_CFG (0x44UL)
+ #define HWRM_VNIC_TPA_QCFG (0x45UL)
+ #define HWRM_VNIC_RSS_CFG (0x46UL)
+ #define HWRM_VNIC_RSS_QCFG (0x47UL)
+ #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
+ #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_VNIC_QCAPS (0x4aUL)
+ #define HWRM_RING_ALLOC (0x50UL)
+ #define HWRM_RING_FREE (0x51UL)
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
+ #define HWRM_RING_RESET (0x5eUL)
+ #define HWRM_RING_GRP_ALLOC (0x60UL)
+ #define HWRM_RING_GRP_FREE (0x61UL)
+ #define RESERVED5 (0x64UL)
+ #define RESERVED6 (0x65UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
+ #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
+ #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
+ #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
+ #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
+ #define RESERVED4 (0x94UL)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
+ #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
+ #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
+ #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
+ #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL)
+ #define HWRM_CFA_EM_FLOW_FREE (0x9dUL)
+ #define HWRM_CFA_EM_FLOW_CFG (0x9eUL)
+ #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
+ #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
+ #define HWRM_STAT_CTX_ALLOC (0xb0UL)
+ #define HWRM_STAT_CTX_FREE (0xb1UL)
+ #define HWRM_STAT_CTX_QUERY (0xb2UL)
+ #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
+ #define HWRM_FW_RESET (0xc0UL)
+ #define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_FW_SET_TIME (0xc8UL)
+ #define HWRM_FW_GET_TIME (0xc9UL)
+ #define HWRM_FW_SET_STRUCTURED_DATA (0xcaUL)
+ #define HWRM_FW_GET_STRUCTURED_DATA (0xcbUL)
+ #define HWRM_FW_IPC_MAILBOX (0xccUL)
+ #define HWRM_EXEC_FWD_RESP (0xd0UL)
+ #define HWRM_REJECT_FWD_RESP (0xd1UL)
+ #define HWRM_FWD_RESP (0xd2UL)
+ #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
+ #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
+ #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
+ #define HWRM_WOL_FILTER_FREE (0xf1UL)
+ #define HWRM_WOL_FILTER_QCFG (0xf2UL)
+ #define HWRM_WOL_REASON_QCFG (0xf3UL)
+ #define HWRM_DBG_READ_DIRECT (0xff10UL)
+ #define HWRM_DBG_READ_INDIRECT (0xff11UL)
+ #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
+ #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
+ #define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_GET_VARIABLE (0xfff1UL)
+ #define HWRM_NVM_SET_VARIABLE (0xfff2UL)
+ #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
+ #define HWRM_NVM_MODIFY (0xfff4UL)
+ #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
+ #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
+ #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
+ #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
+ #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
+ #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
+ #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
+ #define HWRM_NVM_RAW_DUMP (0xfffcUL)
+ #define HWRM_NVM_READ (0xfffdUL)
+ #define HWRM_NVM_WRITE (0xfffeUL)
+ #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS (0x0UL)
+ #define HWRM_ERR_CODE_FAIL (0x1UL)
+ #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
+ #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
+ #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
+ #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
+ #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* Periodic Statistics Context DMA to host (160 bytes) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* Structure data header (16 bytes) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG 0x426UL
+ __le16 len;
+ u8 version;
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ __le16 unused_0[3];
+};
+
+/* DCBX Application configuration structure (8 bytes) */
+struct hwrm_struct_data_dcbx_app_cfg {
+ __le16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 60e2af8678bd..c69602508666 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -34,8 +34,7 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
/* broadcast this async event to all VFs */
req.encap_async_event_target_id = cpu_to_le16(0xffff);
async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
- async_cmpl->type =
- cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+ async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -288,7 +287,7 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
}
if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
}
@@ -421,15 +420,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
/* Remaining rings are distributed equally amongs VF's for now */
- /* TODO: the following workaroud is needed to restrict total number
- * of vf_cp_rings not exceed number of HW ring groups. This WA should
- * be removed once new HWRM provides HW ring groups capability in
- * hwrm_func_qcap.
- */
- vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
- vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
- /* TODO: restore this logic below once the WA above is removed */
- /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
+ vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
@@ -578,8 +569,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
if (pci_vfs_assigned(bp->pdev)) {
bnxt_hwrm_fwd_async_event_cmpl(
- bp, NULL,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
+ bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
num_vfs);
} else {
@@ -592,7 +582,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
bp->pf.active_vfs = 0;
/* Reclaim all resources for the PF. */
- bnxt_hwrm_func_qcaps(bp);
+ rtnl_lock();
+ bnxt_restore_pf_fw_resources(bp);
+ rtnl_unlock();
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
new file mode 100644
index 000000000000..8b7464b76501
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -0,0 +1,346 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <asm/byteorder.h>
+#include <linux/bitmap.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+
+static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_ulp_ops *ulp_ops, void *handle)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+
+ ASSERT_RTNL();
+ if (ulp_id >= BNXT_MAX_ULP)
+ return -EINVAL;
+
+ ulp = &edev->ulp_tbl[ulp_id];
+ if (rcu_access_pointer(ulp->ulp_ops)) {
+ netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
+ return -EBUSY;
+ }
+ if (ulp_id == BNXT_ROCE_ULP) {
+ unsigned int max_stat_ctxs;
+
+ max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+ if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
+ bp->num_stat_ctxs == max_stat_ctxs)
+ return -ENOMEM;
+ bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
+ BNXT_MIN_ROCE_STAT_CTXS);
+ }
+
+ atomic_set(&ulp->ref_count, 0);
+ ulp->handle = handle;
+ rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
+
+ if (ulp_id == BNXT_ROCE_ULP) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_hwrm_vnic_cfg(bp, 0);
+ }
+
+ return 0;
+}
+
+static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+ int i = 0;
+
+ ASSERT_RTNL();
+ if (ulp_id >= BNXT_MAX_ULP)
+ return -EINVAL;
+
+ ulp = &edev->ulp_tbl[ulp_id];
+ if (!rcu_access_pointer(ulp->ulp_ops)) {
+ netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
+ return -EINVAL;
+ }
+ if (ulp_id == BNXT_ROCE_ULP) {
+ unsigned int max_stat_ctxs;
+
+ max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+ bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
+ }
+ if (ulp->max_async_event_id)
+ bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+
+ RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+ synchronize_rcu();
+ ulp->max_async_event_id = 0;
+ ulp->async_events_bmap = NULL;
+ while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
+ msleep(100);
+ i++;
+ }
+ return 0;
+}
+
+static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_msix_entry *ent, int num_msix)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ int max_idx, max_cp_rings;
+ int avail_msix, i, idx;
+
+ ASSERT_RTNL();
+ if (ulp_id != BNXT_ROCE_ULP)
+ return -EINVAL;
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+ return -ENODEV;
+
+ max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+ max_idx = min_t(int, bp->total_irqs, max_cp_rings);
+ avail_msix = max_idx - bp->cp_nr_rings;
+ if (!avail_msix)
+ return -ENOMEM;
+ if (avail_msix > num_msix)
+ avail_msix = num_msix;
+
+ idx = max_idx - avail_msix;
+ for (i = 0; i < avail_msix; i++) {
+ ent[i].vector = bp->irq_tbl[idx + i].vector;
+ ent[i].ring_idx = idx + i;
+ ent[i].db_offset = (idx + i) * 0x80;
+ }
+ bnxt_set_max_func_irqs(bp, max_idx - avail_msix);
+ bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
+ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ return avail_msix;
+}
+
+static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ int max_cp_rings, msix_requested;
+
+ ASSERT_RTNL();
+ if (ulp_id != BNXT_ROCE_ULP)
+ return -EINVAL;
+
+ max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+ msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
+ bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
+ edev->ulp_tbl[ulp_id].msix_requested = 0;
+ bnxt_set_max_func_irqs(bp, bp->total_irqs);
+ return 0;
+}
+
+void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
+{
+ ASSERT_RTNL();
+ if (bnxt_ulp_registered(bp->edev, ulp_id)) {
+ struct bnxt_en_dev *edev = bp->edev;
+ unsigned int msix_req, max;
+
+ msix_req = edev->ulp_tbl[ulp_id].msix_requested;
+ max = bnxt_get_max_func_cp_rings(bp);
+ bnxt_set_max_func_cp_rings(bp, max - msix_req);
+ max = bnxt_get_max_func_stat_ctxs(bp);
+ bnxt_set_max_func_stat_ctxs(bp, max - 1);
+ }
+}
+
+static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_fw_msg *fw_msg)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ struct input *req;
+ int rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ req = fw_msg->msg;
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+ rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
+ fw_msg->timeout);
+ if (!rc) {
+ struct output *resp = bp->hwrm_cmd_resp_addr;
+ u32 len = le16_to_cpu(resp->resp_len);
+
+ if (fw_msg->resp_max_len < len)
+ len = fw_msg->resp_max_len;
+
+ memcpy(fw_msg->resp, resp, len);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_ulp_get(struct bnxt_ulp *ulp)
+{
+ atomic_inc(&ulp->ref_count);
+}
+
+static void bnxt_ulp_put(struct bnxt_ulp *ulp)
+{
+ atomic_dec(&ulp->ref_count);
+}
+
+void bnxt_ulp_stop(struct bnxt *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = rtnl_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_stop)
+ continue;
+ ops->ulp_stop(ulp->handle);
+ }
+}
+
+void bnxt_ulp_start(struct bnxt *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = rtnl_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_start)
+ continue;
+ ops->ulp_start(ulp->handle);
+ }
+}
+
+void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ rcu_read_lock();
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_sriov_config) {
+ rcu_read_unlock();
+ continue;
+ }
+ bnxt_ulp_get(ulp);
+ rcu_read_unlock();
+ ops->ulp_sriov_config(ulp->handle, num_vfs);
+ bnxt_ulp_put(ulp);
+ }
+}
+
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ continue;
+ if (!ulp->async_events_bmap ||
+ event_id > ulp->max_async_event_id)
+ continue;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+ }
+ rcu_read_unlock();
+}
+
+static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
+ unsigned long *events_bmap, u16 max_id)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+
+ if (ulp_id >= BNXT_MAX_ULP)
+ return -EINVAL;
+
+ ulp = &edev->ulp_tbl[ulp_id];
+ ulp->async_events_bmap = events_bmap;
+ /* Make sure bnxt_ulp_async_events() sees this order */
+ smp_wmb();
+ ulp->max_async_event_id = max_id;
+ bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
+ return 0;
+}
+
+static const struct bnxt_en_ops bnxt_en_ops_tbl = {
+ .bnxt_register_device = bnxt_register_dev,
+ .bnxt_unregister_device = bnxt_unregister_dev,
+ .bnxt_request_msix = bnxt_req_msix_vecs,
+ .bnxt_free_msix = bnxt_free_msix_vecs,
+ .bnxt_send_fw_msg = bnxt_send_msg,
+ .bnxt_register_fw_async_events = bnxt_register_async_events,
+};
+
+struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_en_dev *edev;
+
+ edev = bp->edev;
+ if (!edev) {
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return ERR_PTR(-ENOMEM);
+ edev->en_ops = &bnxt_en_ops_tbl;
+ if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
+ if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
+ edev->net = dev;
+ edev->pdev = bp->pdev;
+ bp->edev = edev;
+ }
+ return bp->edev;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
new file mode 100644
index 000000000000..74f816e46a33
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -0,0 +1,93 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ULP_H
+#define BNXT_ULP_H
+
+#define BNXT_ROCE_ULP 0
+#define BNXT_OTHER_ULP 1
+#define BNXT_MAX_ULP 2
+
+#define BNXT_MIN_ROCE_CP_RINGS 2
+#define BNXT_MIN_ROCE_STAT_CTXS 1
+
+struct hwrm_async_event_cmpl;
+struct bnxt;
+
+struct bnxt_ulp_ops {
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_stop)(void *);
+ void (*ulp_start)(void *);
+ void (*ulp_sriov_config)(void *, int);
+};
+
+struct bnxt_msix_entry {
+ u32 vector;
+ u32 ring_idx;
+ u32 db_offset;
+};
+
+struct bnxt_fw_msg {
+ void *msg;
+ int msg_len;
+ void *resp;
+ int resp_max_len;
+ int timeout;
+};
+
+struct bnxt_ulp {
+ void *handle;
+ struct bnxt_ulp_ops __rcu *ulp_ops;
+ unsigned long *async_events_bmap;
+ u16 max_async_event_id;
+ u16 msix_requested;
+ atomic_t ref_count;
+};
+
+struct bnxt_en_dev {
+ struct net_device *net;
+ struct pci_dev *pdev;
+ u32 flags;
+ #define BNXT_EN_FLAG_ROCEV1_CAP 0x1
+ #define BNXT_EN_FLAG_ROCEV2_CAP 0x2
+ #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
+ BNXT_EN_FLAG_ROCEV2_CAP)
+ const struct bnxt_en_ops *en_ops;
+ struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+};
+
+struct bnxt_en_ops {
+ int (*bnxt_register_device)(struct bnxt_en_dev *, int,
+ struct bnxt_ulp_ops *, void *);
+ int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
+ int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
+ struct bnxt_msix_entry *, int);
+ int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
+ int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
+ struct bnxt_fw_msg *);
+ int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
+ unsigned long *, u16);
+};
+
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+{
+ if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
+ return true;
+ return false;
+}
+
+void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
+void bnxt_ulp_stop(struct bnxt *bp);
+void bnxt_ulp_start(struct bnxt *bp);
+void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
+struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 6e723662ee69..ce7de6f72512 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1530,15 +1530,14 @@ static const struct net_device_ops xgmac_netdev_ops = {
.ndo_set_features = xgmac_set_features,
};
-static int xgmac_ethtool_getsettings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int xgmac_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->autoneg = 0;
- cmd->duplex = DUPLEX_FULL;
- ethtool_cmd_speed_set(cmd, 10000);
- cmd->supported = 0;
- cmd->advertising = 0;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.autoneg = 0;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = 10000;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 0);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 0);
return 0;
}
@@ -1681,7 +1680,6 @@ static int xgmac_set_wol(struct net_device *dev,
}
static const struct ethtool_ops xgmac_ethtool_ops = {
- .get_settings = xgmac_ethtool_getsettings,
.get_link = ethtool_op_get_link,
.get_pauseparam = xgmac_get_pauseparam,
.set_pauseparam = xgmac_set_pauseparam,
@@ -1690,6 +1688,7 @@ static const struct ethtool_ops xgmac_ethtool_ops = {
.get_wol = xgmac_get_wol,
.set_wol = xgmac_set_wol,
.get_sset_count = xgmac_get_sset_count,
+ .get_link_ksettings = xgmac_ethtool_get_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 9119af088821..a1de0d12927d 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -468,6 +468,9 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
struct device *dev = ep->dev->dev.parent;
int i;
+ if (!ep->descs)
+ return;
+
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
dma_addr_t d;
@@ -490,6 +493,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
ep->descs_dma_addr);
+ ep->descs = NULL;
}
static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1fb5d7239254..0e74529a4209 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -90,7 +90,8 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
{
OPCODE_COMMON_SET_HSW_CONFIG,
CMD_SUBSYSTEM_COMMON,
- BE_PRIV_DEVCFG | BE_PRIV_VHADM
+ BE_PRIV_DEVCFG | BE_PRIV_VHADM |
+ BE_PRIV_DEVSEC
},
{
OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 6456c180114b..45abc81f6f55 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include <linux/module.h>
#include <net/ethoc.h>
@@ -221,6 +222,9 @@ struct ethoc {
struct mii_bus *mdio;
struct clk *clk;
s8 phy_id;
+
+ int old_link;
+ int old_duplex;
};
/**
@@ -572,7 +576,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
/* We always handle the dropped packet interrupt */
if (pending & INT_MASK_BUSY) {
- dev_err(&dev->dev, "packet dropped\n");
+ dev_dbg(&dev->dev, "packet dropped\n");
dev->stats.rx_dropped++;
}
@@ -667,6 +671,32 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
static void ethoc_mdio_poll(struct net_device *dev)
{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ bool changed = false;
+ u32 mode;
+
+ if (priv->old_link != phydev->link) {
+ changed = true;
+ priv->old_link = phydev->link;
+ }
+
+ if (priv->old_duplex != phydev->duplex) {
+ changed = true;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ if (!changed)
+ return;
+
+ mode = ethoc_read(priv, MODER);
+ if (phydev->duplex == DUPLEX_FULL)
+ mode |= MODER_FULLD;
+ else
+ mode &= ~MODER_FULLD;
+ ethoc_write(priv, MODER, mode);
+
+ phy_print_status(phydev);
}
static int ethoc_mdio_probe(struct net_device *dev)
@@ -685,6 +715,9 @@ static int ethoc_mdio_probe(struct net_device *dev)
return -ENXIO;
}
+ priv->old_duplex = -1;
+ priv->old_link = -1;
+
err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
PHY_INTERFACE_MODE_GMII);
if (err) {
@@ -721,6 +754,9 @@ static int ethoc_open(struct net_device *dev)
netif_start_queue(dev);
}
+ priv->old_link = -1;
+ priv->old_duplex = -1;
+
phy_start(dev->phydev);
napi_enable(&priv->napi);
@@ -1123,11 +1159,9 @@ static int ethoc_probe(struct platform_device *pdev)
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
- const uint8_t *mac;
+ const void *mac;
- mac = of_get_property(pdev->dev.of_node,
- "local-mac-address",
- NULL);
+ mac = of_get_mac_address(pdev->dev.of_node);
if (mac)
memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
priv->phy_id = -1;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ecaa7a90b7c8..38160c2bebcb 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2310,6 +2310,8 @@ static const struct fec_stat {
{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
};
+#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
+
static void fec_enet_update_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
@@ -2327,7 +2329,7 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev,
if (netif_running(dev))
fec_enet_update_ethtool_stats(dev);
- memcpy(data, fep->ethtool_stats, ARRAY_SIZE(fec_stats) * sizeof(u64));
+ memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
}
static void fec_enet_get_strings(struct net_device *netdev,
@@ -2352,6 +2354,12 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
return -EOPNOTSUPP;
}
}
+
+#else /* !defined(CONFIG_M5272) */
+#define FEC_STATS_SIZE 0
+static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
+{
+}
#endif /* !defined(CONFIG_M5272) */
/* ITR clock source is enet system clock (clk_ahb).
@@ -3279,8 +3287,7 @@ fec_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
- ARRAY_SIZE(fec_stats) * sizeof(u64),
- num_tx_qs, num_rx_qs);
+ FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index e69a6bed31a9..ee7e9ce2f5b3 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -11,8 +11,10 @@
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/circ_buf.h>
@@ -183,12 +185,28 @@
#define DESC_DATA_LEN_OFF 16
#define DESC_BUFF_LEN_OFF 0
#define DESC_DATA_MASK 0x7ff
+#define DESC_SG BIT(30)
+#define DESC_FRAGS_NUM_OFF 11
/* DMA descriptor ring helpers */
#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
#define dma_cnt(n) ((n) >> 5)
#define dma_byte(n) ((n) << 5)
+#define HW_CAP_TSO BIT(0)
+#define GEMAC_V1 0
+#define GEMAC_V2 (GEMAC_V1 | HW_CAP_TSO)
+#define HAS_CAP_TSO(hw_cap) ((hw_cap) & HW_CAP_TSO)
+
+#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
+
+enum phy_reset_delays {
+ PRE_DELAY,
+ PULSE,
+ POST_DELAY,
+ DELAYS_NUM,
+};
+
struct hix5hd2_desc {
__le32 buff_addr;
__le32 cmd;
@@ -201,6 +219,27 @@ struct hix5hd2_desc_sw {
unsigned int size;
};
+struct hix5hd2_sg_desc_ring {
+ struct sg_desc *desc;
+ dma_addr_t phys_addr;
+};
+
+struct frags_info {
+ __le32 addr;
+ __le32 size;
+};
+
+/* hardware supported max skb frags num */
+#define SG_MAX_SKB_FRAGS 17
+struct sg_desc {
+ __le32 total_len;
+ __le32 resvd0;
+ __le32 linear_addr;
+ __le32 linear_len;
+ /* reserve one more frags for memory alignment */
+ struct frags_info frags[SG_MAX_SKB_FRAGS + 1];
+};
+
#define QUEUE_NUMS 4
struct hix5hd2_priv {
struct hix5hd2_desc_sw pool[QUEUE_NUMS];
@@ -208,6 +247,7 @@ struct hix5hd2_priv {
#define rx_bq pool[1]
#define tx_bq pool[2]
#define tx_rq pool[3]
+ struct hix5hd2_sg_desc_ring tx_ring;
void __iomem *base;
void __iomem *ctrl_base;
@@ -221,15 +261,30 @@ struct hix5hd2_priv {
struct device_node *phy_node;
phy_interface_t phy_mode;
+ unsigned long hw_cap;
unsigned int speed;
unsigned int duplex;
- struct clk *clk;
+ struct clk *mac_core_clk;
+ struct clk *mac_ifc_clk;
+ struct reset_control *mac_core_rst;
+ struct reset_control *mac_ifc_rst;
+ struct reset_control *phy_rst;
+ u32 phy_reset_delays[DELAYS_NUM];
struct mii_bus *bus;
struct napi_struct napi;
struct work_struct tx_timeout_task;
};
+static inline void hix5hd2_mac_interface_reset(struct hix5hd2_priv *priv)
+{
+ if (!priv->mac_ifc_rst)
+ return;
+
+ reset_control_assert(priv->mac_ifc_rst);
+ reset_control_deassert(priv->mac_ifc_rst);
+}
+
static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
@@ -262,6 +317,7 @@ static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
if (duplex)
val |= GMAC_FULL_DUPLEX;
writel_relaxed(val, priv->ctrl_base);
+ hix5hd2_mac_interface_reset(priv);
writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
if (speed == SPEED_1000)
@@ -511,6 +567,27 @@ next:
return num;
}
+static void hix5hd2_clean_sg_desc(struct hix5hd2_priv *priv,
+ struct sk_buff *skb, u32 pos)
+{
+ struct sg_desc *desc;
+ dma_addr_t addr;
+ u32 len;
+ int i;
+
+ desc = priv->tx_ring.desc + pos;
+
+ addr = le32_to_cpu(desc->linear_addr);
+ len = le32_to_cpu(desc->linear_len);
+ dma_unmap_single(priv->dev, addr, len, DMA_TO_DEVICE);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ addr = le32_to_cpu(desc->frags[i].addr);
+ len = le32_to_cpu(desc->frags[i].size);
+ dma_unmap_page(priv->dev, addr, len, DMA_TO_DEVICE);
+ }
+}
+
static void hix5hd2_xmit_reclaim(struct net_device *dev)
{
struct sk_buff *skb;
@@ -538,8 +615,15 @@ static void hix5hd2_xmit_reclaim(struct net_device *dev)
pkts_compl++;
bytes_compl += skb->len;
desc = priv->tx_rq.desc + pos;
- addr = le32_to_cpu(desc->buff_addr);
- dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+
+ if (skb_shinfo(skb)->nr_frags) {
+ hix5hd2_clean_sg_desc(priv, skb, pos);
+ } else {
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len,
+ DMA_TO_DEVICE);
+ }
+
priv->tx_skb[pos] = NULL;
dev_consume_skb_any(skb);
pos = dma_ring_incr(pos, TX_DESC_NUM);
@@ -600,12 +684,66 @@ static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static u32 hix5hd2_get_desc_cmd(struct sk_buff *skb, unsigned long hw_cap)
+{
+ u32 cmd = 0;
+
+ if (HAS_CAP_TSO(hw_cap)) {
+ if (skb_shinfo(skb)->nr_frags)
+ cmd |= DESC_SG;
+ cmd |= skb_shinfo(skb)->nr_frags << DESC_FRAGS_NUM_OFF;
+ } else {
+ cmd |= DESC_FL_FULL |
+ ((skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
+ }
+
+ cmd |= (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF;
+ cmd |= DESC_VLD_BUSY;
+
+ return cmd;
+}
+
+static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
+ struct sk_buff *skb, u32 pos)
+{
+ struct sg_desc *desc;
+ dma_addr_t addr;
+ int ret;
+ int i;
+
+ desc = priv->tx_ring.desc + pos;
+
+ desc->total_len = cpu_to_le32(skb->len);
+ addr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr)))
+ return -EINVAL;
+ desc->linear_addr = cpu_to_le32(addr);
+ desc->linear_len = cpu_to_le32(skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = frag->size;
+
+ addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(priv->dev, addr);
+ if (unlikely(ret))
+ return -EINVAL;
+ desc->frags[i].addr = cpu_to_le32(addr);
+ desc->frags[i].size = cpu_to_le32(len);
+ }
+
+ return 0;
+}
+
static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
struct hix5hd2_desc *desc;
dma_addr_t addr;
u32 pos;
+ u32 cmd;
+ int ret;
/* software write pointer */
pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
@@ -616,18 +754,31 @@ static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- addr = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->dev, addr)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
desc = priv->tx_bq.desc + pos;
+
+ cmd = hix5hd2_get_desc_cmd(skb, priv->hw_cap);
+ desc->cmd = cpu_to_le32(cmd);
+
+ if (skb_shinfo(skb)->nr_frags) {
+ ret = hix5hd2_fill_sg_desc(priv, skb, pos);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ addr = priv->tx_ring.phys_addr + pos * sizeof(struct sg_desc);
+ } else {
+ addr = dma_map_single(priv->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
desc->buff_addr = cpu_to_le32(addr);
+
priv->tx_skb[pos] = skb;
- desc->cmd = cpu_to_le32(DESC_VLD_BUSY | DESC_FL_FULL |
- (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF |
- (skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
/* ensure desc updated */
wmb();
@@ -681,16 +832,26 @@ static int hix5hd2_net_open(struct net_device *dev)
struct phy_device *phy;
int ret;
- ret = clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->mac_core_clk);
+ if (ret < 0) {
+ netdev_err(dev, "failed to enable mac core clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mac_ifc_clk);
if (ret < 0) {
- netdev_err(dev, "failed to enable clk %d\n", ret);
+ clk_disable_unprepare(priv->mac_core_clk);
+ netdev_err(dev, "failed to enable mac ifc clk %d\n", ret);
return ret;
}
phy = of_phy_connect(dev, priv->phy_node,
&hix5hd2_adjust_link, 0, priv->phy_mode);
- if (!phy)
+ if (!phy) {
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
return -ENODEV;
+ }
phy_start(phy);
hix5hd2_hw_init(priv);
@@ -721,7 +882,8 @@ static int hix5hd2_net_close(struct net_device *dev)
phy_disconnect(dev->phydev);
}
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
return 0;
}
@@ -862,10 +1024,82 @@ error_free_pool:
return -ENOMEM;
}
+static int hix5hd2_init_sg_desc_queue(struct hix5hd2_priv *priv)
+{
+ struct sg_desc *desc;
+ dma_addr_t phys_addr;
+
+ desc = (struct sg_desc *)dma_alloc_coherent(priv->dev,
+ TX_DESC_NUM * sizeof(struct sg_desc),
+ &phys_addr, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ priv->tx_ring.desc = desc;
+ priv->tx_ring.phys_addr = phys_addr;
+
+ return 0;
+}
+
+static void hix5hd2_destroy_sg_desc_queue(struct hix5hd2_priv *priv)
+{
+ if (priv->tx_ring.desc) {
+ dma_free_coherent(priv->dev,
+ TX_DESC_NUM * sizeof(struct sg_desc),
+ priv->tx_ring.desc, priv->tx_ring.phys_addr);
+ priv->tx_ring.desc = NULL;
+ }
+}
+
+static inline void hix5hd2_mac_core_reset(struct hix5hd2_priv *priv)
+{
+ if (!priv->mac_core_rst)
+ return;
+
+ reset_control_assert(priv->mac_core_rst);
+ reset_control_deassert(priv->mac_core_rst);
+}
+
+static void hix5hd2_sleep_us(u32 time_us)
+{
+ u32 time_ms;
+
+ if (!time_us)
+ return;
+
+ time_ms = DIV_ROUND_UP(time_us, 1000);
+ if (time_ms < 20)
+ usleep_range(time_us, time_us + 500);
+ else
+ msleep(time_ms);
+}
+
+static void hix5hd2_phy_reset(struct hix5hd2_priv *priv)
+{
+ /* To make sure PHY hardware reset success,
+ * we must keep PHY in deassert state first and
+ * then complete the hardware reset operation
+ */
+ reset_control_deassert(priv->phy_rst);
+ hix5hd2_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
+
+ reset_control_assert(priv->phy_rst);
+ /* delay some time to ensure reset ok,
+ * this depends on PHY hardware feature
+ */
+ hix5hd2_sleep_us(priv->phy_reset_delays[PULSE]);
+ reset_control_deassert(priv->phy_rst);
+ /* delay some time to ensure later MDIO access */
+ hix5hd2_sleep_us(priv->phy_reset_delays[POST_DELAY]);
+}
+
+static const struct of_device_id hix5hd2_of_match[];
+
static int hix5hd2_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
+ const struct of_device_id *of_id = NULL;
struct net_device *ndev;
struct hix5hd2_priv *priv;
struct resource *res;
@@ -883,6 +1117,13 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
priv->dev = dev;
priv->netdev = ndev;
+ of_id = of_match_device(hix5hd2_of_match, dev);
+ if (!of_id) {
+ ret = -EINVAL;
+ goto out_free_netdev;
+ }
+ priv->hw_cap = (unsigned long)of_id->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->base)) {
@@ -897,23 +1138,55 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
goto out_free_netdev;
}
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- netdev_err(ndev, "failed to get clk\n");
+ priv->mac_core_clk = devm_clk_get(&pdev->dev, "mac_core");
+ if (IS_ERR(priv->mac_core_clk)) {
+ netdev_err(ndev, "failed to get mac core clk\n");
ret = -ENODEV;
goto out_free_netdev;
}
- ret = clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->mac_core_clk);
if (ret < 0) {
- netdev_err(ndev, "failed to enable clk %d\n", ret);
+ netdev_err(ndev, "failed to enable mac core clk %d\n", ret);
goto out_free_netdev;
}
+ priv->mac_ifc_clk = devm_clk_get(&pdev->dev, "mac_ifc");
+ if (IS_ERR(priv->mac_ifc_clk))
+ priv->mac_ifc_clk = NULL;
+
+ ret = clk_prepare_enable(priv->mac_ifc_clk);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to enable mac ifc clk %d\n", ret);
+ goto out_disable_mac_core_clk;
+ }
+
+ priv->mac_core_rst = devm_reset_control_get(dev, "mac_core");
+ if (IS_ERR(priv->mac_core_rst))
+ priv->mac_core_rst = NULL;
+ hix5hd2_mac_core_reset(priv);
+
+ priv->mac_ifc_rst = devm_reset_control_get(dev, "mac_ifc");
+ if (IS_ERR(priv->mac_ifc_rst))
+ priv->mac_ifc_rst = NULL;
+
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->phy_rst)) {
+ priv->phy_rst = NULL;
+ } else {
+ ret = of_property_read_u32_array(node,
+ PHY_RESET_DELAYS_PROPERTY,
+ priv->phy_reset_delays,
+ DELAYS_NUM);
+ if (ret)
+ goto out_disable_clk;
+ hix5hd2_phy_reset(priv);
+ }
+
bus = mdiobus_alloc();
if (bus == NULL) {
ret = -ENOMEM;
- goto out_free_netdev;
+ goto out_disable_clk;
}
bus->priv = priv;
@@ -972,22 +1245,38 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
ndev->ethtool_ops = &hix5hd2_ethtools_ops;
SET_NETDEV_DEV(ndev, dev);
+ if (HAS_CAP_TSO(priv->hw_cap))
+ ndev->hw_features |= NETIF_F_SG;
+
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->vlan_features |= ndev->features;
+
ret = hix5hd2_init_hw_desc_queue(priv);
if (ret)
goto out_phy_node;
netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+
+ if (HAS_CAP_TSO(priv->hw_cap)) {
+ ret = hix5hd2_init_sg_desc_queue(priv);
+ if (ret)
+ goto out_destroy_queue;
+ }
+
ret = register_netdev(priv->netdev);
if (ret) {
netdev_err(ndev, "register_netdev failed!");
goto out_destroy_queue;
}
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
return ret;
out_destroy_queue:
+ if (HAS_CAP_TSO(priv->hw_cap))
+ hix5hd2_destroy_sg_desc_queue(priv);
netif_napi_del(&priv->napi);
hix5hd2_destroy_hw_desc_queue(priv);
out_phy_node:
@@ -996,6 +1285,10 @@ err_mdiobus:
mdiobus_unregister(bus);
err_free_mdio:
mdiobus_free(bus);
+out_disable_clk:
+ clk_disable_unprepare(priv->mac_ifc_clk);
+out_disable_mac_core_clk:
+ clk_disable_unprepare(priv->mac_core_clk);
out_free_netdev:
free_netdev(ndev);
@@ -1012,6 +1305,8 @@ static int hix5hd2_dev_remove(struct platform_device *pdev)
mdiobus_unregister(priv->bus);
mdiobus_free(priv->bus);
+ if (HAS_CAP_TSO(priv->hw_cap))
+ hix5hd2_destroy_sg_desc_queue(priv);
hix5hd2_destroy_hw_desc_queue(priv);
of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task);
@@ -1021,7 +1316,10 @@ static int hix5hd2_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id hix5hd2_of_match[] = {
- {.compatible = "hisilicon,hix5hd2-gmac",},
+ { .compatible = "hisilicon,hisi-gemac-v1", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hisi-gemac-v2", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hix5hd2-gemac", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hi3798cv200-gemac", .data = (void *)GEMAC_V2 },
{},
};
@@ -1029,7 +1327,7 @@ MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
static struct platform_driver hix5hd2_dev_driver = {
.driver = {
- .name = "hix5hd2-gmac",
+ .name = "hisi-gemac",
.of_match_table = hix5hd2_of_match,
},
.probe = hix5hd2_dev_probe,
@@ -1038,6 +1336,6 @@ static struct platform_driver hix5hd2_dev_driver = {
module_platform_driver(hix5hd2_dev_driver);
-MODULE_DESCRIPTION("HISILICON HIX5HD2 Ethernet driver");
+MODULE_DESCRIPTION("HISILICON Gigabit Ethernet MAC driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hix5hd2-gmac");
+MODULE_ALIAS("platform:hisi-gemac");
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 09602f1187f5..8016854796fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -99,6 +99,8 @@ enum hnae_led_state {
#define HNS_RX_FLAG_L3ID_IPV6 0x1
#define HNS_RX_FLAG_L4ID_UDP 0x0
#define HNS_RX_FLAG_L4ID_TCP 0x1
+#define HNS_RX_FLAG_L4ID_SCTP 0x3
+
#define HNS_TXD_ASID_S 0
#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 776d81e785d8..672b64606321 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -566,6 +566,71 @@ static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
}
+static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb, u32 flag)
+{
+ struct net_device *netdev = ring_data->napi.dev;
+ u32 l3id;
+ u32 l4id;
+
+ /* check if RX checksum offload is enabled */
+ if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+ return;
+
+ /* In hardware, we only support checksum for the following protocols:
+ * 1) IPv4,
+ * 2) TCP(over IPv4 or IPv6),
+ * 3) UDP(over IPv4 or IPv6),
+ * 4) SCTP(over IPv4 or IPv6)
+ * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
+ * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
+ *
+ * Hardware limitation:
+ * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
+ * Error" bit (which usually can be used to indicate whether checksum
+ * was calculated by the hardware and if there was any error encountered
+ * during checksum calculation).
+ *
+ * Software workaround:
+ * We do get info within the RX descriptor about the kind of L3/L4
+ * protocol coming in the packet and the error status. These errors
+ * might not just be checksum errors but could be related to version,
+ * length of IPv4, UDP, TCP etc.
+ * Because there is no-way of knowing if it is a L3/L4 error due to bad
+ * checksum or any other L3/L4 error, we will not (cannot) convey
+ * checksum status for such cases to upper stack and will not maintain
+ * the RX L3/L4 checksum counters as well.
+ */
+
+ l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
+ l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
+
+ /* check L3 protocol for which checksum is supported */
+ if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
+ return;
+
+ /* check for any(not just checksum)flagged L3 protocol errors */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
+ return;
+
+ /* we do not support checksum of fragmented packets */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
+ return;
+
+ /* check L4 protocol for which checksum is supported */
+ if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
+ (l4id != HNS_RX_FLAG_L4ID_UDP) &&
+ (l4id != HNS_RX_FLAG_L4ID_SCTP))
+ return;
+
+ /* check for any(not just checksum)flagged L4 protocol errors */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
+ return;
+
+ /* now, this has to be a packet with valid RX checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff **out_skb, int *out_bnum)
{
@@ -684,13 +749,10 @@ out_bnum_err:
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
- if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
- hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
- ring->stats.l3l4_csum_err++;
- return 0;
- }
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* indicate to upper stack if our hardware has already calculated
+ * the RX checksum
+ */
+ hns_nic_rx_checksum(ring_data, skb, bnum_flag);
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4cb8fb3dcb34..ba8d30984bee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -356,10 +356,11 @@ struct i40e_pf {
#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45)
#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46)
#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
-#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48)
+#define I40E_FLAG_PHY_CONTROLS_LEDS BIT_ULL(48)
#define I40E_FLAG_PF_MAC BIT_ULL(50)
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52)
+#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -596,6 +597,7 @@ struct i40e_vsi {
u16 veb_idx; /* index of VEB parent */
struct kobject *kobj; /* sysfs object */
bool current_isup; /* Sync 'link up' logging */
+ enum i40e_aq_link_speed current_speed; /* Sync link speed logging */
void *priv; /* client driver data reference. */
@@ -850,7 +852,9 @@ int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
+int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
+void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
const u8 *macaddr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 67e396b2b347..b2101a51534c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1642,6 +1642,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
};
@@ -1650,6 +1654,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@@ -1657,7 +1662,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@@ -1680,6 +1686,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
@@ -1690,7 +1698,22 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@@ -1712,7 +1735,20 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1792,9 +1828,18 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 external_power_ability;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index eb392d63cd03..128735975caa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -53,6 +53,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_KX_X722:
@@ -1183,6 +1185,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
media = I40E_MEDIA_TYPE_FIBER;
break;
case I40E_PHY_TYPE_100BASE_TX:
@@ -1197,6 +1201,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1204,6 +1209,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -1608,8 +1614,10 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
- if (report_init)
+ if (report_init) {
hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
+ hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ }
return status;
}
@@ -1701,10 +1709,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* Copy over all the old settings */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status)
@@ -4665,6 +4676,78 @@ phy_write_end:
}
/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ i40e_status status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+i40e_status i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ i40e_status status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index b8a03a05c4e8..f1f41f12902f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -172,7 +172,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
f->macaddr, f->vlan,
i40e_filter_state_string[f->state]);
}
- dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n",
+ dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
vsi->active_filters, vsi->promisc_threshold,
(test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ?
"ON" : "OFF"));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index dd4457d29e98..8e46098bad57 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -39,6 +39,8 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 76753e1ff6c2..cc1465aac2ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -265,8 +265,9 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
u32 *advertising)
{
- enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types;
struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
+ u64 phy_types = pf->hw.phy.phy_types;
+
*supported = 0x0;
*advertising = 0x0;
@@ -369,6 +370,13 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
*advertising |= ADVERTISED_1000baseKX_Full;
}
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
+ *supported |= SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ }
}
/**
@@ -491,6 +499,14 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ADVERTISED_1000baseKX_Full |
ADVERTISED_Autoneg;
break;
+ case I40E_PHY_TYPE_25GBASE_KR:
+ case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_SR:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ ecmd->supported = SUPPORTED_Autoneg;
+ ecmd->advertising = ADVERTISED_Autoneg;
+ /* TODO: add speeds when ethtool is ready to support*/
+ break;
default:
/* if we got here and link is up something bad is afoot */
netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
@@ -512,6 +528,14 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_LINK_SPEED_40GB:
ethtool_cmd_speed_set(ecmd, SPEED_40000);
break;
+ case I40E_LINK_SPEED_25GB:
+#ifdef SPEED_25000
+ ethtool_cmd_speed_set(ecmd, SPEED_25000);
+#else
+ netdev_info(netdev,
+ "Speed is 25G, display not supported by this version of ethtool.\n");
+#endif
+ break;
case I40E_LINK_SPEED_20GB:
ethtool_cmd_speed_set(ecmd, SPEED_20000);
break;
@@ -1657,8 +1681,19 @@ static int i40e_get_ts_info(struct net_device *dev,
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
+
+ if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
return 0;
}
@@ -1890,7 +1925,7 @@ static int i40e_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) {
pf->led_status = i40e_led_get(hw);
} else {
i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
@@ -1900,20 +1935,20 @@ static int i40e_set_phys_id(struct net_device *netdev,
}
return blink_freq;
case ETHTOOL_ID_ON:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
i40e_led_set(hw, 0xf, false);
else
ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
break;
case ETHTOOL_ID_OFF:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
i40e_led_set(hw, 0x0, false);
else
ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
break;
case ETHTOOL_ID_INACTIVE:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
- i40e_led_set(hw, false, pf->led_status);
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) {
+ i40e_led_set(hw, pf->led_status, false);
} else {
ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5777e49ee9e4..da4cbe32eb86 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 21
+#define DRV_VERSION_BUILD 25
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -86,6 +86,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
/* required last entry */
{0, }
};
@@ -286,8 +288,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
void i40e_service_event_schedule(struct i40e_pf *pf)
{
if (!test_bit(__I40E_DOWN, &pf->state) &&
- !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
- !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
queue_work(i40e_wq, &pf->service_task);
}
@@ -1226,6 +1227,140 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
}
/**
+ * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
+ * @vsi: the VSI to configure
+ * @tmp_add_list: list of filters ready to be added
+ * @tmp_del_list: list of filters ready to be deleted
+ * @vlan_filters: the number of active VLAN filters
+ *
+ * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
+ * behave as expected. If we have any active VLAN filters remaining or about
+ * to be added then we need to update non-VLAN filters to be marked as VLAN=0
+ * so that they only match against untagged traffic. If we no longer have any
+ * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
+ * so that they match against both tagged and untagged traffic. In this way,
+ * we ensure that we correctly receive the desired traffic. This ensures that
+ * when we have an active VLAN we will receive only untagged traffic and
+ * traffic matching active VLANs. If we have no active VLANs then we will
+ * operate in non-VLAN mode and receive all traffic, tagged or untagged.
+ *
+ * Finally, in a similar fashion, this function also corrects filters when
+ * there is an active PVID assigned to this VSI.
+ *
+ * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
+ *
+ * This function is only expected to be called from within
+ * i40e_sync_vsi_filters.
+ *
+ * NOTE: This function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
+ struct hlist_head *tmp_add_list,
+ struct hlist_head *tmp_del_list,
+ int vlan_filters)
+{
+ struct i40e_mac_filter *f, *add_head;
+ struct hlist_node *h;
+ int bkt, new_vlan;
+
+ /* To determine if a particular filter needs to be replaced we
+ * have the three following conditions:
+ *
+ * a) if we have a PVID assigned, then all filters which are
+ * not marked as VLAN=PVID must be replaced with filters that
+ * are.
+ * b) otherwise, if we have any active VLANS, all filters
+ * which are marked as VLAN=-1 must be replaced with
+ * filters marked as VLAN=0
+ * c) finally, if we do not have any active VLANS, all filters
+ * which are marked as VLAN=0 must be replaced with filters
+ * marked as VLAN=-1
+ */
+
+ /* Update the filters about to be added in place */
+ hlist_for_each_entry(f, tmp_add_list, hlist) {
+ if (vsi->info.pvid && f->vlan != vsi->info.pvid)
+ f->vlan = vsi->info.pvid;
+ else if (vlan_filters && f->vlan == I40E_VLAN_ANY)
+ f->vlan = 0;
+ else if (!vlan_filters && f->vlan == 0)
+ f->vlan = I40E_VLAN_ANY;
+ }
+
+ /* Update the remaining active filters */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ /* Combine the checks for whether a filter needs to be changed
+ * and then determine the new VLAN inside the if block, in
+ * order to avoid duplicating code for adding the new filter
+ * then deleting the old filter.
+ */
+ if ((vsi->info.pvid && f->vlan != vsi->info.pvid) ||
+ (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (!vlan_filters && f->vlan == 0)) {
+ /* Determine the new vlan we will be adding */
+ if (vsi->info.pvid)
+ new_vlan = vsi->info.pvid;
+ else if (vlan_filters)
+ new_vlan = 0;
+ else
+ new_vlan = I40E_VLAN_ANY;
+
+ /* Create the new filter */
+ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
+ if (!add_head)
+ return -ENOMEM;
+
+ /* Put the replacement filter into the add list */
+ hash_del(&add_head->hlist);
+ hlist_add_head(&add_head->hlist, tmp_add_list);
+
+ /* Put the original filter into the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, tmp_del_list);
+ }
+ }
+
+ vsi->has_vlan_filter = !!vlan_filters;
+
+ return 0;
+}
+
+/**
+ * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
+ * @vsi: the PF Main VSI - inappropriate for any other VSI
+ * @macaddr: the MAC address
+ *
+ * Remove whatever filter the firmware set up so the driver can manage
+ * its own filtering intelligently.
+ **/
+static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+{
+ struct i40e_aqc_remove_macvlan_element_data element;
+ struct i40e_pf *pf = vsi->back;
+
+ /* Only appropriate for the PF main VSI */
+ if (vsi->type != I40E_VSI_MAIN)
+ return;
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ /* Ignore error returns, some firmware does it this way... */
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ /* ...and some firmware does it this way. */
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+}
+
+/**
* i40e_add_filter - Add a mac/vlan filter to the VSI
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -1723,7 +1858,6 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
* @count: Number of filters added
* @add_list: return data from fw
* @head: pointer to first filter in current batch
- * @aq_err: status from fw
*
* MAC filter entries from list were slated to be added to device. Returns
* number of successful filters. Note that 0 does NOT mean success!
@@ -1731,47 +1865,30 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
static int
i40e_update_filter_state(int count,
struct i40e_aqc_add_macvlan_element_data *add_list,
- struct i40e_mac_filter *add_head, int aq_err)
+ struct i40e_mac_filter *add_head)
{
int retval = 0;
int i;
-
- if (!aq_err) {
- retval = count;
- /* Everything's good, mark all filters active. */
- for (i = 0; i < count ; i++) {
- add_head->state = I40E_FILTER_ACTIVE;
- add_head = hlist_entry(add_head->hlist.next,
- typeof(struct i40e_mac_filter),
- hlist);
- }
- } else if (aq_err == I40E_AQ_RC_ENOSPC) {
- /* Device ran out of filter space. Check the return value
- * for each filter to see which ones are active.
+ for (i = 0; i < count; i++) {
+ /* Always check status of each filter. We don't need to check
+ * the firmware return status because we pre-set the filter
+ * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
+ * request to the adminq. Thus, if it no longer matches then
+ * we know the filter is active.
*/
- for (i = 0; i < count ; i++) {
- if (add_list[i].match_method ==
- I40E_AQC_MM_ERR_NO_RES) {
- add_head->state = I40E_FILTER_FAILED;
- } else {
- add_head->state = I40E_FILTER_ACTIVE;
- retval++;
- }
- add_head = hlist_entry(add_head->hlist.next,
- typeof(struct i40e_mac_filter),
- hlist);
- }
- } else {
- /* Some other horrible thing happened, fail all filters */
- retval = 0;
- for (i = 0; i < count ; i++) {
+ if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
add_head->state = I40E_FILTER_FAILED;
- add_head = hlist_entry(add_head->hlist.next,
- typeof(struct i40e_mac_filter),
- hlist);
+ } else {
+ add_head->state = I40E_FILTER_ACTIVE;
+ retval++;
}
+
+ add_head = hlist_entry(add_head->hlist.next,
+ typeof(struct i40e_mac_filter),
+ hlist);
}
+
return retval;
}
@@ -1830,18 +1947,15 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_add, bool *promisc_changed)
{
struct i40e_hw *hw = &vsi->back->hw;
- i40e_status aq_ret;
int aq_err, fcnt;
- aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
+ i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
aq_err = hw->aq.asq_last_status;
- fcnt = i40e_update_filter_state(num_add, list, add_head, aq_ret);
- vsi->active_filters += fcnt;
+ fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
*promisc_changed = true;
set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
- vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
i40e_aq_str(hw, aq_err),
@@ -1903,8 +2017,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
struct hlist_head tmp_add_list, tmp_del_list;
struct i40e_mac_filter *f, *add_head = NULL;
struct i40e_hw *hw = &vsi->back->hw;
- unsigned int vlan_any_filters = 0;
- unsigned int non_vlan_filters = 0;
+ unsigned int failed_filters = 0;
unsigned int vlan_filters = 0;
bool promisc_changed = false;
char vsi_name[16] = "PF";
@@ -1951,7 +2064,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* Move the element into temporary del_list */
hash_del(&f->hlist);
hlist_add_head(&f->hlist, &tmp_del_list);
- vsi->active_filters--;
/* Avoid counting removed filters */
continue;
@@ -1961,67 +2073,21 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
hlist_add_head(&f->hlist, &tmp_add_list);
}
- /* Count the number of each type of filter we have
- * remaining, ignoring any filters we're about to
- * delete.
+ /* Count the number of active (current and new) VLAN
+ * filters we have now. Does not count filters which
+ * are marked for deletion.
*/
if (f->vlan > 0)
vlan_filters++;
- else if (!f->vlan)
- non_vlan_filters++;
- else
- vlan_any_filters++;
}
- /* We should never have VLAN=-1 filters at the same time as we
- * have either VLAN=0 or VLAN>0 filters, so warn about this
- * case here to help catch any issues.
- */
- WARN_ON(vlan_any_filters && (vlan_filters + non_vlan_filters));
-
- /* If we only have VLAN=0 filters remaining, and don't have
- * any other VLAN filters, we need to convert these VLAN=0
- * filters into VLAN=-1 (I40E_VLAN_ANY) so that we operate
- * correctly in non-VLAN mode and receive all traffic tagged
- * or untagged.
- */
- if (non_vlan_filters && !vlan_filters) {
- hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
- hlist) {
- /* Only replace VLAN=0 filters */
- if (f->vlan)
- continue;
-
- /* Allocate a replacement element */
- add_head = kzalloc(sizeof(*add_head),
- GFP_KERNEL);
- if (!add_head)
- goto err_no_memory_locked;
+ retval = i40e_correct_mac_vlan_filters(vsi,
+ &tmp_add_list,
+ &tmp_del_list,
+ vlan_filters);
+ if (retval)
+ goto err_no_memory_locked;
- /* Copy the filter, with new state and VLAN */
- *add_head = *f;
- add_head->state = I40E_FILTER_NEW;
- add_head->vlan = I40E_VLAN_ANY;
-
- /* Move the replacement to the add list */
- INIT_HLIST_NODE(&add_head->hlist);
- hlist_add_head(&add_head->hlist,
- &tmp_add_list);
-
- /* Move the original to the delete list */
- f->state = I40E_FILTER_REMOVE;
- hash_del(&f->hlist);
- hlist_add_head(&f->hlist, &tmp_del_list);
- vsi->active_filters--;
- }
-
- /* Also update any filters on the tmp_add list */
- hlist_for_each_entry(f, &tmp_add_list, hlist) {
- if (!f->vlan)
- f->vlan = I40E_VLAN_ANY;
- }
- add_head = NULL;
- }
spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
@@ -2086,14 +2152,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
del_list = NULL;
}
- /* After finishing notifying firmware of the deleted filters, update
- * the cached value of vsi->has_vlan_filter. Note that we are safe to
- * use just !!vlan_filters here because if we only have VLAN=0 (that
- * is, non_vlan_filters) these will all be converted to VLAN=-1 in the
- * logic above already so this value would still be correct.
- */
- vsi->has_vlan_filter = !!vlan_filters;
-
if (!hlist_empty(&tmp_add_list)) {
/* Do all the adds now. */
filter_list_len = hw->aq.asq_buf_size /
@@ -2137,6 +2195,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cpu_to_le16((u16)(f->vlan));
}
add_list[num_add].queue_number = 0;
+ /* set invalid match method for later detection */
+ add_list[num_add].match_method =
+ cpu_to_le16((u16)I40E_AQC_MM_ERR_NO_RES);
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
@@ -2169,27 +2230,36 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
add_list = NULL;
}
- /* Check to see if we can drop out of overflow promiscuous mode. */
+ /* Determine the number of active and failed filters. */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ vsi->active_filters = 0;
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->state == I40E_FILTER_ACTIVE)
+ vsi->active_filters++;
+ else if (f->state == I40E_FILTER_FAILED)
+ failed_filters++;
+ }
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* If promiscuous mode has changed, we need to calculate a new
+ * threshold for when we are safe to exit
+ */
+ if (promisc_changed)
+ vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
+
+ /* Check if we are able to exit overflow promiscuous mode. We can
+ * safely exit if we didn't just enter, we no longer have any failed
+ * filters, and we have reduced filters below the threshold value.
+ */
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
+ !promisc_changed && !failed_filters &&
(vsi->active_filters < vsi->promisc_threshold)) {
- int failed_count = 0;
- /* See if we have any failed filters. We can't drop out of
- * promiscuous until these have all been deleted.
- */
- spin_lock_bh(&vsi->mac_filter_hash_lock);
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->state == I40E_FILTER_FAILED)
- failed_count++;
- }
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- if (!failed_count) {
- dev_info(&pf->pdev->dev,
- "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
- vsi_name);
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
- promisc_changed = true;
- vsi->promisc_threshold = 0;
- }
+ dev_info(&pf->pdev->dev,
+ "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
+ vsi_name);
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ promisc_changed = true;
+ vsi->promisc_threshold = 0;
}
/* if the VF is not trusted do not do promisc */
@@ -2469,30 +2539,24 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
}
/**
- * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
* @vsi: the vsi being configured
* @vid: vlan id to be added (0 = untagged only , -1 = any)
+ *
+ * This is a helper function for adding a new MAC/VLAN filter with the
+ * specified VLAN for each existing MAC address already in the hash table.
+ * This function does *not* perform any accounting to update filters based on
+ * VLAN mode.
+ *
+ * NOTE: this function expects to be called while under the
+ * mac_filter_hash_lock
**/
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
{
- struct i40e_mac_filter *f, *add_f, *del_f;
+ struct i40e_mac_filter *f, *add_f;
struct hlist_node *h;
int bkt;
- /* Locked once because all functions invoked below iterates list*/
- spin_lock_bh(&vsi->mac_filter_hash_lock);
-
- if (vsi->netdev) {
- add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add vlan filter %d for %pM\n",
- vid, vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- return -ENOMEM;
- }
- }
-
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE)
continue;
@@ -2501,54 +2565,28 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
- /* Now if we add a vlan tag, make sure to check if it is the first
- * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
- * with 0, so we now accept untagged and specified tagged traffic
- * (and not all tags along with untagged)
- */
- if (vid > 0 && vsi->netdev) {
- del_f = i40e_find_filter(vsi, vsi->netdev->dev_addr,
- I40E_VLAN_ANY);
- if (del_f) {
- __i40e_del_filter(vsi, del_f);
- add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter 0 for %pM\n",
- vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- return -ENOMEM;
- }
- }
- }
+ return 0;
+}
- /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
- if (vid > 0 && !vsi->info.pvid) {
- hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
- if (f->state == I40E_FILTER_REMOVE)
- continue;
- del_f = i40e_find_filter(vsi, f->macaddr,
- I40E_VLAN_ANY);
- if (!del_f)
- continue;
- __i40e_del_filter(vsi, del_f);
- add_f = i40e_add_filter(vsi, f->macaddr, 0);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter 0 for %pM\n",
- f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- return -ENOMEM;
- }
- }
- }
+/**
+ * i40e_vsi_add_vlan - Add VSI membership for given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be added (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ int err;
+ /* Locked once because all functions invoked below iterates list*/
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ err = i40e_add_vlan_all_mac(vsi, vid);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ if (err)
+ return err;
/* schedule our worker thread which will take care of
* applying the new filter changes
@@ -2558,28 +2596,39 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
/**
- * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
* @vsi: the vsi being configured
* @vid: vlan id to be removed (0 = untagged only , -1 = any)
- **/
-void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+ *
+ * This function should be used to remove all VLAN filters which match the
+ * given VID. It does not schedule the service event and does not take the
+ * mac_filter_hash_lock so it may be combined with other operations under
+ * a single invocation of the mac_filter_hash_lock.
+ *
+ * NOTE: this function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
{
- struct net_device *netdev = vsi->netdev;
struct i40e_mac_filter *f;
struct hlist_node *h;
int bkt;
- /* Locked once because all functions invoked below iterates list */
- spin_lock_bh(&vsi->mac_filter_hash_lock);
-
- if (vsi->netdev)
- i40e_del_filter(vsi, netdev->dev_addr, vid);
-
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->vlan == vid)
__i40e_del_filter(vsi, f);
}
+}
+/**
+ * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be removed (0 = untagged only , -1 = any)
+ **/
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_rm_vlan_all_mac(vsi, vid);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* schedule our worker thread which will take care of
@@ -5225,12 +5274,16 @@ out:
*/
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{
+ enum i40e_aq_link_speed new_speed;
char *speed = "Unknown";
char *fc = "Unknown";
- if (vsi->current_isup == isup)
+ new_speed = vsi->back->hw.phy.link_info.link_speed;
+
+ if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
return;
vsi->current_isup = isup;
+ vsi->current_speed = new_speed;
if (!isup) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
return;
@@ -5252,6 +5305,9 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_20GB:
speed = "20 G";
break;
+ case I40E_LINK_SPEED_25GB:
+ speed = "25 G";
+ break;
case I40E_LINK_SPEED_10GB:
speed = "10 G";
break;
@@ -5942,19 +5998,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
}
/**
- * i40e_service_event_complete - Finish up the service event
- * @pf: board private structure
- **/
-static void i40e_service_event_complete(struct i40e_pf *pf)
-{
- WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
-
- /* flush memory to make sure state is correct before next watchog */
- smp_mb__before_atomic();
- clear_bit(__I40E_SERVICE_SCHED, &pf->state);
-}
-
-/**
* i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
* @pf: board private structure
**/
@@ -7263,10 +7306,12 @@ static void i40e_service_task(struct work_struct *work)
/* don't bother with service tasks if a reset is in progress */
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
- i40e_service_event_complete(pf);
return;
}
+ if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ return;
+
i40e_detect_recover_hung(pf);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
@@ -7279,7 +7324,9 @@ static void i40e_service_task(struct work_struct *work)
i40e_sync_udp_filters_subtask(pf);
i40e_clean_adminq_subtask(pf);
- i40e_service_event_complete(pf);
+ /* flush memory to make sure state is correct before next watchdog */
+ smp_mb__before_atomic();
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
/* If the tasks have taken longer than one timer cycle or there
* is more work to be done, reschedule the service task now
@@ -8696,7 +8743,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
I40E_FLAG_NO_PCI_LINK_CHECK |
I40E_FLAG_USE_SET_LLDP_MIB |
- I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ I40E_FLAG_GENEVE_OFFLOAD_CAPABLE |
+ I40E_FLAG_PTP_L4_CAPABLE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
((pf->hw.aq.api_maj_ver == 1) &&
(pf->hw.aq.api_min_ver > 4))) {
@@ -9291,6 +9339,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
+ /* The following steps are necessary to prevent reception
+ * of tagged packets - some older NVM configurations load a
+ * default a MAC-VLAN filter that accepts any tagged packet
+ * which must be replaced by a normal filter.
+ */
+ i40e_rm_default_mac_filter(vsi, mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -9824,6 +9878,8 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
/* assign it some queues */
ret = i40e_alloc_rings(vsi);
@@ -11352,7 +11408,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -11364,8 +11419,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->main_vsi_seid);
if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
- (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
- pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
+ (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+ pf->flags |= I40E_FLAG_PHY_CONTROLS_LEDS;
if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
pf->flags |= I40E_FLAG_HAVE_CRT_RETIMER;
/* print a string summarizing features */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 37d67e792ad0..2551fc827444 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -373,6 +373,10 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5e2272c9e717..9e49ffafce28 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -521,6 +521,8 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE))
+ return -ERANGE;
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
@@ -528,19 +530,26 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE))
+ return -ERANGE;
+ /* fall through */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
- I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
- I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
- config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V2;
+ if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE) {
+ tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ } else {
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ }
break;
case HWTSTAMP_FILTER_ALL:
default:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 5544b509832f..352cf7cd2ef4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2027,7 +2027,7 @@ tx_only:
else
i40e_update_enable_itr(vsi, q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index de8550f4e3a4..e065321ce8ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -173,26 +173,37 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
#define I40E_MAX_DATA_PER_TXD_ALIGNED \
(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
-/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
- * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
- * that 12K is not a power of 2 and division is expensive. It is used to
- * approximate the number of descriptors used per linear buffer. Note
- * that this will overestimate in some cases as it doesn't account for the
- * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
- * the error should not impact things much as large buffers usually mean
- * we will use fewer descriptors then there are frags in an skb.
+/**
+ * i40e_txd_use_count - estimate the number of descriptors needed for Tx
+ * @size: transmit request size in bytes
+ *
+ * Due to hardware alignment restrictions (4K alignment), we need to
+ * assume that we can have no more than 12K of data per descriptor, even
+ * though each descriptor can take up to 16K - 1 bytes of aligned memory.
+ * Thus, we need to divide by 12K. But division is slow! Instead,
+ * we decompose the operation into shifts and one relatively cheap
+ * multiply operation.
+ *
+ * To divide by 12K, we first divide by 4K, then divide by 3:
+ * To divide by 4K, shift right by 12 bits
+ * To divide by 3, multiply by 85, then divide by 256
+ * (Divide by 256 is done by shifting right by 8 bits)
+ * Finally, we add one to round up. Because 256 isn't an exact multiple of
+ * 3, we'll underestimate near each multiple of 12K. This is actually more
+ * accurate as we have 4K - 1 of wiggle room that we can fit into the last
+ * segment. For our purposes this is accurate out to 1M which is orders of
+ * magnitude greater than our largest possible GSO size.
+ *
+ * This would then be implemented as:
+ * return (((size >> 12) * 85) >> 8) + 1;
+ *
+ * Since multiplication and division are commutative, we can reorder
+ * operations into:
+ * return ((size * 85) >> 20) + 1;
*/
static inline unsigned int i40e_txd_use_count(unsigned int size)
{
- const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
- const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
- unsigned int adjust = ~(u32)0;
-
- /* if we rounded up on the reciprocal pull down the adjustment */
- if ((max * reciprocal) > adjust)
- adjust = ~(u32)(reciprocal - 1);
-
- return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+ return ((size * 85) >> 20) + 1;
}
/* Tx Descriptors needed, worst case */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index bd1ffae4054d..edc0abdf4783 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -213,47 +213,59 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
- BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- enum i40e_aq_capabilities_phy_type phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 05ed49b4b7c0..a6198b727e24 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2766,7 +2766,6 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- bool is_vsi_in_vlan = false;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
int ret = 0;
@@ -2803,11 +2802,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
+ /* Locked once because multiple functions below iterate list */
spin_lock_bh(&vsi->mac_filter_hash_lock);
- is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
+ if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
@@ -2830,14 +2828,23 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
*/
if ((!(vlan_id || qos) ||
vlanprio != le16_to_cpu(vsi->info.pvid)) &&
- vsi->info.pvid)
- ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
+ vsi->info.pvid) {
+ ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+ vsi->back->hw.aq.asq_last_status);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_pvid;
+ }
+ }
if (vsi->info.pvid) {
- /* kill old VLAN */
- i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
- VLAN_VID_MASK));
+ /* remove all filters on the old VLAN */
+ i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
}
+
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
@@ -2847,24 +2854,30 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
- /* add new VLAN filter */
- ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ /* add new VLAN filter for each MAC */
+ ret = i40e_add_vlan_all_mac(vsi, vlan_id);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"add VF VLAN failed, ret=%d aq_err=%d\n", ret,
vsi->back->hw.aq.asq_last_status);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_pvid;
}
- /* Kill non-vlan MAC filters - ignore error return since
- * there might not be any non-vlan MAC filters.
- */
- i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
+
+ /* remove the previously added non-VLAN MAC filters */
+ i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
}
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* Schedule the worker thread to take care of applying changes */
+ i40e_service_event_schedule(vsi->back);
+
if (ret) {
dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
goto error_pvid;
}
+
/* The Port VLAN needs to be saved across resets the same as the
* default LAN MAC address.
*/
@@ -2921,6 +2934,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
case I40E_LINK_SPEED_40GB:
speed = 40000;
break;
+ case I40E_LINK_SPEED_25GB:
+ speed = 25000;
+ break;
case I40E_LINK_SPEED_20GB:
speed = 20000;
break;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 40b0eafd0c71..eeb9864bc5b1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1639,6 +1639,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
};
@@ -1647,6 +1651,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@@ -1654,7 +1659,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@@ -1677,6 +1683,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
@@ -1687,7 +1695,22 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@@ -1709,7 +1732,20 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1789,9 +1825,18 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 external_power_ability;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7953c13451b9..aa63b7fb993d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -53,6 +53,8 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_SFP_X722:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index 70235706915e..21dcaee1ad1d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -39,6 +39,8 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index d89d52109efa..ba6c6bda0e22 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -115,6 +115,10 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index c4b174afd253..df67ef37b7f3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1490,7 +1490,7 @@ tx_only:
else
i40e_update_enable_itr(vsi, q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index a586e19cfd1d..a5fc789f78eb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -173,26 +173,37 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
#define I40E_MAX_DATA_PER_TXD_ALIGNED \
(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
-/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
- * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
- * that 12K is not a power of 2 and division is expensive. It is used to
- * approximate the number of descriptors used per linear buffer. Note
- * that this will overestimate in some cases as it doesn't account for the
- * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
- * the error should not impact things much as large buffers usually mean
- * we will use fewer descriptors then there are frags in an skb.
+/**
+ * i40e_txd_use_count - estimate the number of descriptors needed for Tx
+ * @size: transmit request size in bytes
+ *
+ * Due to hardware alignment restrictions (4K alignment), we need to
+ * assume that we can have no more than 12K of data per descriptor, even
+ * though each descriptor can take up to 16K - 1 bytes of aligned memory.
+ * Thus, we need to divide by 12K. But division is slow! Instead,
+ * we decompose the operation into shifts and one relatively cheap
+ * multiply operation.
+ *
+ * To divide by 12K, we first divide by 4K, then divide by 3:
+ * To divide by 4K, shift right by 12 bits
+ * To divide by 3, multiply by 85, then divide by 256
+ * (Divide by 256 is done by shifting right by 8 bits)
+ * Finally, we add one to round up. Because 256 isn't an exact multiple of
+ * 3, we'll underestimate near each multiple of 12K. This is actually more
+ * accurate as we have 4K - 1 of wiggle room that we can fit into the last
+ * segment. For our purposes this is accurate out to 1M which is orders of
+ * magnitude greater than our largest possible GSO size.
+ *
+ * This would then be implemented as:
+ * return (((size >> 12) * 85) >> 8) + 1;
+ *
+ * Since multiplication and division are commutative, we can reorder
+ * operations into:
+ * return ((size * 85) >> 20) + 1;
*/
static inline unsigned int i40e_txd_use_count(unsigned int size)
{
- const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
- const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
- unsigned int adjust = ~(u32)0;
-
- /* if we rounded up on the reciprocal pull down the adjustment */
- if ((max * reciprocal) > adjust)
- adjust = ~(u32)(reciprocal - 1);
-
- return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+ return ((size * 85) >> 20) + 1;
}
/* Tx Descriptors needed, worst case */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 515484c2b82a..c85e8a31c072 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -187,47 +187,59 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
- BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- enum i40e_aq_capabilities_phy_type phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index a9940154eead..272d600c1ed0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -85,6 +85,14 @@ static int i40evf_get_settings(struct net_device *netdev,
case I40E_LINK_SPEED_40GB:
ethtool_cmd_speed_set(ecmd, SPEED_40000);
break;
+ case I40E_LINK_SPEED_25GB:
+#ifdef SPEED_25000
+ ethtool_cmd_speed_set(ecmd, SPEED_25000);
+#else
+ netdev_info(netdev,
+ "Speed is 25G, display not supported by this version of ethtool.\n");
+#endif
+ break;
case I40E_LINK_SPEED_20GB:
ethtool_cmd_speed_set(ecmd, SPEED_20000);
break;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index ca850212521e..c0fc53361800 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 21
+#define DRV_VERSION_BUILD 25
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index ddf478d6322b..2059a8e88908 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -836,6 +836,9 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter)
case I40E_LINK_SPEED_40GB:
speed = "40 G";
break;
+ case I40E_LINK_SPEED_25GB:
+ speed = "25 G";
+ break;
case I40E_LINK_SPEED_20GB:
speed = "20 G";
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index b0448b55fbc7..3797cc7c1288 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -260,11 +260,6 @@ static void dump_buf(void *buf, int size, int data_only, int offset)
pr_debug("\n");
}
-enum {
- MLX5_DRIVER_STATUS_ABORTED = 0xfe,
- MLX5_DRIVER_SYND = 0xbadd00de,
-};
-
static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
u32 *synd, u8 *status)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 63dd6390b161..951dbd58594d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -262,7 +262,7 @@ struct mlx5e_tstamp {
};
enum {
- MLX5E_RQ_STATE_FLUSH,
+ MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
MLX5E_RQ_STATE_AM,
};
@@ -415,7 +415,7 @@ struct mlx5e_sq_dma {
};
enum {
- MLX5E_SQ_STATE_FLUSH,
+ MLX5E_SQ_STATE_ENABLED,
MLX5E_SQ_STATE_BF_ENABLE,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 9def5cc378a3..07020276fe73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -831,6 +831,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
+ set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_disable_rq;
@@ -845,6 +846,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
return 0;
err_disable_rq:
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
mlx5e_disable_rq(rq);
err_destroy_rq:
mlx5e_destroy_rq(rq);
@@ -854,7 +856,7 @@ err_destroy_rq:
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
cancel_work_sync(&rq->am.work);
@@ -1078,7 +1080,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
- MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, sq->uar.index);
@@ -1155,6 +1156,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
if (err)
goto err_destroy_sq;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
false, 0);
if (err)
@@ -1168,6 +1170,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
return 0;
err_disable_sq:
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
mlx5e_disable_sq(sq);
err_destroy_sq:
mlx5e_destroy_sq(sq);
@@ -1184,7 +1187,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
- set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
/* prevent netif_tx_wake_queue */
napi_synchronize(&sq->channel->napi);
@@ -3163,7 +3166,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue;
sched_work = true;
- set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
}
@@ -3227,13 +3230,13 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
for (i = 0; i < priv->params.num_channels; i++) {
struct mlx5e_channel *c = priv->channel[i];
- set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
napi_synchronize(&c->napi);
/* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
old_prog = xchg(&c->rq.xdp_prog, prog);
- clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
/* napi_schedule in case we have missed anything */
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
napi_schedule(&c->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 42cd687e6608..0e2fb3ed1790 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -340,7 +340,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
sq->db.ico_wqe[pi].num_wqebbs = 1;
- mlx5e_send_nop(sq, true);
+ mlx5e_send_nop(sq, false);
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
@@ -412,7 +412,7 @@ void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
- if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) {
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
return;
}
@@ -445,7 +445,7 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
}
#define RQ_CANNOT_POST(rq) \
- (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
+ (!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
@@ -928,7 +928,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
int work_done = 0;
- if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
if (cq->decmprs_left)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 70a717382357..cfb68371c397 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -409,7 +409,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
- if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
npkts = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 5703f19a6a24..e5c12a732aa1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -56,7 +56,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
struct mlx5_cqe64 *cqe;
u16 sqcc;
- if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
cqe = mlx5e_get_cqe(cq);
@@ -113,7 +113,7 @@ static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
sq = container_of(cq, struct mlx5e_sq, cq);
- if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2dc28695196c..7b4c339a8a9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,13 +62,13 @@ MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
-int mlx5_core_debug_mask;
-module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
+unsigned int mlx5_core_debug_mask;
+module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
#define MLX5_DEFAULT_PROF 2
-static int prof_sel = MLX5_DEFAULT_PROF;
-module_param_named(prof_sel, prof_sel, int, 0444);
+static unsigned int prof_sel = MLX5_DEFAULT_PROF;
+module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
enum {
@@ -767,13 +767,15 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
u8 status;
mlx5_cmd_mbox_status(query_out, &status, &syndrome);
- if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
- pr_debug("Only ISSI 0 is supported\n");
- return 0;
+ if (!status || syndrome == MLX5_DRIVER_SYND) {
+ mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
+ err, status, syndrome);
+ return err;
}
- pr_err("failed to query ISSI err(%d)\n", err);
- return err;
+ mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
+ dev->issi = 0;
+ return 0;
}
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
@@ -787,7 +789,8 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
set_out, sizeof(set_out));
if (err) {
- pr_err("failed to set ISSI=1 err(%d)\n", err);
+ mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
+ err);
return err;
}
@@ -1266,13 +1269,6 @@ static int init_one(struct pci_dev *pdev,
dev->pdev = pdev;
dev->event = mlx5_core_event;
-
- if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
- mlx5_core_warn(dev,
- "selected profile out of range, selecting default (%d)\n",
- MLX5_DEFAULT_PROF);
- prof_sel = MLX5_DEFAULT_PROF;
- }
dev->profile = &profile[prof_sel];
INIT_LIST_HEAD(&priv->ctx_list);
@@ -1490,10 +1486,22 @@ static struct pci_driver mlx5_core_driver = {
.sriov_configure = mlx5_core_sriov_configure,
};
+static void mlx5_core_verify_params(void)
+{
+ if (prof_sel >= ARRAY_SIZE(profile)) {
+ pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
+ prof_sel,
+ ARRAY_SIZE(profile) - 1,
+ MLX5_DEFAULT_PROF);
+ prof_sel = MLX5_DEFAULT_PROF;
+ }
+}
+
static int __init init(void)
{
int err;
+ mlx5_core_verify_params();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 7e635ebda199..e0a8fbdd1446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -44,11 +44,11 @@
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
-extern int mlx5_core_debug_mask;
+extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
- dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
- (__dev)->priv.name, __func__, __LINE__, current->pid, \
+ dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
@@ -63,8 +63,8 @@ do { \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
- dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
- (__dev)->priv.name, __func__, __LINE__, current->pid, \
+ dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_info(__dev, format, ...) \
@@ -75,6 +75,11 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
+enum {
+ MLX5_DRIVER_STATUS_ABORTED = 0xfe,
+ MLX5_DRIVER_SYND = 0xbadd00de,
+};
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index de4e2a240d88..8e5cb7605b0f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2229,6 +2229,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
mapping))) {
DP_NOTICE(cdev,
"Unable to map frag - dropping packet\n");
+ rc = -ENOMEM;
goto err;
}
} else {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2830190aaace..f9b97f5946f8 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2344,6 +2344,13 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
+static int rtl8169_nway_reset(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return mii_nway_restart(&tp->mii);
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
@@ -2359,6 +2366,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_sset_count = rtl8169_get_sset_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
+ .nway_reset = rtl8169_nway_reset,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 990746955216..f35385266fbf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -43,9 +43,11 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM;
+ value &= ~DMA_AXI_WR_OSR_LMT;
value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
DMA_AXI_WR_OSR_LMT_SHIFT;
+ value &= ~DMA_AXI_RD_OSR_LMT;
value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
DMA_AXI_RD_OSR_LMT_SHIFT;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 577316de6ba8..e81b6e565c29 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -30,9 +30,11 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM;
+ value &= ~DMA_AXI_WR_OSR_LMT;
value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
DMA_AXI_WR_OSR_LMT_SHIFT;
+ value &= ~DMA_AXI_RD_OSR_LMT;
value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
DMA_AXI_RD_OSR_LMT_SHIFT;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d5a8122b6033..6ab7e2bdcadd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -406,9 +406,7 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
return 0;
}
- spin_lock(&priv->lock);
rc = phy_ethtool_ksettings_set(phy, cmd);
- spin_unlock(&priv->lock);
return rc;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 98bf86d64d96..e528e7126b65 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -126,8 +126,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
- of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt);
- of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt);
+ if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
+ axi->axi_wr_osr_lmt = 1;
+ if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
+ axi->axi_rd_osr_lmt = 1;
of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
of_node_put(np);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 9904d740d528..ff7f518a0702 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -74,7 +74,7 @@ config TI_CPSW
will be called cpsw.
config TI_CPTS
- bool "TI Common Platform Time Sync (CPTS) Support"
+ tristate "TI Common Platform Time Sync (CPTS) Support"
depends on TI_CPSW
select PTP_1588_CLOCK
---help---
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index d420d9413e4a..1e7c10bf8713 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -12,8 +12,9 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
+obj-$(CONFIG_TI_CPTS) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw.o cpts.o
+ti_cpsw-y := cpsw.o
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
keystone_netcp-y := netcp_core.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 3f96c57f3580..b62d958c5fae 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -753,6 +753,82 @@ requeue:
dev_kfree_skb_any(new_skb);
}
+/* split budget depending on channel rates */
+static void cpsw_split_budget(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_vector *txv = cpsw->txv;
+ u32 consumed_rate, bigest_rate = 0;
+ int budget, bigest_rate_ch = 0;
+ struct cpsw_slave *slave;
+ int i, rlim_ch_num = 0;
+ u32 ch_rate, max_rate;
+ int ch_budget = 0;
+
+ if (cpsw->data.dual_emac)
+ slave = &cpsw->slaves[priv->emac_port];
+ else
+ slave = &cpsw->slaves[cpsw->data.active_slave];
+
+ max_rate = slave->phy->speed * 1000;
+
+ consumed_rate = 0;
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (!ch_rate)
+ continue;
+
+ rlim_ch_num++;
+ consumed_rate += ch_rate;
+ }
+
+ if (cpsw->tx_ch_num == rlim_ch_num) {
+ max_rate = consumed_rate;
+ } else {
+ ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
+ ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ bigest_rate = (max_rate - consumed_rate) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ }
+
+ /* split tx budget */
+ budget = CPSW_POLL_WEIGHT;
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (ch_rate) {
+ txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
+ if (!txv[i].budget)
+ txv[i].budget = 1;
+ if (ch_rate > bigest_rate) {
+ bigest_rate_ch = i;
+ bigest_rate = ch_rate;
+ }
+ } else {
+ txv[i].budget = ch_budget;
+ if (!bigest_rate_ch)
+ bigest_rate_ch = i;
+ }
+
+ budget -= txv[i].budget;
+ }
+
+ if (budget)
+ txv[bigest_rate_ch].budget += budget;
+
+ /* split rx budget */
+ budget = CPSW_POLL_WEIGHT;
+ ch_budget = budget / cpsw->rx_ch_num;
+ for (i = 0; i < cpsw->rx_ch_num; i++) {
+ cpsw->rxv[i].budget = ch_budget;
+ budget -= ch_budget;
+ }
+
+ if (budget)
+ cpsw->rxv[0].budget += budget;
+}
+
static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
{
struct cpsw_common *cpsw = dev_id;
@@ -941,6 +1017,7 @@ static void cpsw_adjust_link(struct net_device *ndev)
for_each_slave(priv, _cpsw_adjust_link, priv, &link);
if (link) {
+ cpsw_split_budget(priv->ndev);
netif_carrier_on(ndev);
if (netif_running(ndev))
netif_tx_wake_all_queues(ndev);
@@ -1280,82 +1357,6 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
}
}
-/* split budget depending on channel rates */
-static void cpsw_split_budget(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_vector *txv = cpsw->txv;
- u32 consumed_rate, bigest_rate = 0;
- int budget, bigest_rate_ch = 0;
- struct cpsw_slave *slave;
- int i, rlim_ch_num = 0;
- u32 ch_rate, max_rate;
- int ch_budget = 0;
-
- if (cpsw->data.dual_emac)
- slave = &cpsw->slaves[priv->emac_port];
- else
- slave = &cpsw->slaves[cpsw->data.active_slave];
-
- max_rate = slave->phy->speed * 1000;
-
- consumed_rate = 0;
- for (i = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(txv[i].ch);
- if (!ch_rate)
- continue;
-
- rlim_ch_num++;
- consumed_rate += ch_rate;
- }
-
- if (cpsw->tx_ch_num == rlim_ch_num) {
- max_rate = consumed_rate;
- } else {
- ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
- ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
- (cpsw->tx_ch_num - rlim_ch_num);
- bigest_rate = (max_rate - consumed_rate) /
- (cpsw->tx_ch_num - rlim_ch_num);
- }
-
- /* split tx budget */
- budget = CPSW_POLL_WEIGHT;
- for (i = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(txv[i].ch);
- if (ch_rate) {
- txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
- if (!txv[i].budget)
- txv[i].budget = 1;
- if (ch_rate > bigest_rate) {
- bigest_rate_ch = i;
- bigest_rate = ch_rate;
- }
- } else {
- txv[i].budget = ch_budget;
- if (!bigest_rate_ch)
- bigest_rate_ch = i;
- }
-
- budget -= txv[i].budget;
- }
-
- if (budget)
- txv[bigest_rate_ch].budget += budget;
-
- /* split rx budget */
- budget = CPSW_POLL_WEIGHT;
- ch_budget = budget / cpsw->rx_ch_num;
- for (i = 0; i < cpsw->rx_ch_num; i++) {
- cpsw->rxv[i].budget = ch_budget;
- budget -= ch_budget;
- }
-
- if (budget)
- cpsw->rxv[0].budget += budget;
-}
-
static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
{
struct cpsw_common *cpsw = priv->cpsw;
@@ -1486,9 +1487,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (ret < 0)
goto err_cleanup;
- if (cpts_register(cpsw->dev, cpsw->cpts,
- cpsw->data.cpts_clock_mult,
- cpsw->data.cpts_clock_shift))
+ if (cpts_register(cpsw->cpts))
dev_err(priv->dev, "error registering cpts device\n");
}
@@ -1501,7 +1500,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_set_coalesce(ndev, &coal);
}
- cpsw_split_budget(ndev);
cpdma_ctlr_start(cpsw->dma);
cpsw_intr_enable(cpsw);
@@ -1562,7 +1560,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- cpsw->cpts->tx_enable)
+ cpts_is_tx_enabled(cpsw->cpts))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_tx_timestamp(skb);
@@ -1594,14 +1592,15 @@ fail:
return NETDEV_TX_BUSY;
}
-#ifdef CONFIG_TI_CPTS
+#if IS_ENABLED(CONFIG_TI_CPTS)
static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
{
struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
u32 ts_en, seq_id;
- if (!cpsw->cpts->tx_enable && !cpsw->cpts->rx_enable) {
+ if (!cpts_is_tx_enabled(cpsw->cpts) &&
+ !cpts_is_rx_enabled(cpsw->cpts)) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -1609,10 +1608,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (cpsw->cpts->tx_enable)
+ if (cpts_is_tx_enabled(cpsw->cpts))
ts_en |= CPSW_V1_TS_TX_EN;
- if (cpsw->cpts->rx_enable)
+ if (cpts_is_rx_enabled(cpsw->cpts))
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -1635,20 +1634,20 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
case CPSW_VERSION_2:
ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (cpsw->cpts->tx_enable)
+ if (cpts_is_tx_enabled(cpsw->cpts))
ctrl |= CTRL_V2_TX_TS_BITS;
- if (cpsw->cpts->rx_enable)
+ if (cpts_is_rx_enabled(cpsw->cpts))
ctrl |= CTRL_V2_RX_TS_BITS;
break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
- if (cpsw->cpts->tx_enable)
+ if (cpts_is_tx_enabled(cpsw->cpts))
ctrl |= CTRL_V3_TX_TS_BITS;
- if (cpsw->cpts->rx_enable)
+ if (cpts_is_rx_enabled(cpsw->cpts))
ctrl |= CTRL_V3_RX_TS_BITS;
break;
}
@@ -1684,7 +1683,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts->rx_enable = 0;
+ cpts_rx_enable(cpts, 0);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
@@ -1700,14 +1699,14 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts->rx_enable = 1;
+ cpts_rx_enable(cpts, 1);
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON;
+ cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -1736,13 +1735,23 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts->rx_enable ?
+ cfg.tx_type = cpts_is_tx_enabled(cpts) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+#else
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
#endif /*CONFIG_TI_CPTS*/
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1755,12 +1764,10 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
return -EINVAL;
switch (cmd) {
-#ifdef CONFIG_TI_CPTS
case SIOCSHWTSTAMP:
return cpsw_hwtstamp_set(dev, req);
case SIOCGHWTSTAMP:
return cpsw_hwtstamp_get(dev, req);
-#endif
}
if (!cpsw->slaves[slave_no].phy)
@@ -2100,10 +2107,10 @@ static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
priv->msg_enable = value;
}
+#if IS_ENABLED(CONFIG_TI_CPTS)
static int cpsw_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
-#ifdef CONFIG_TI_CPTS
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
info->so_timestamping =
@@ -2120,7 +2127,12 @@ static int cpsw_get_ts_info(struct net_device *ndev,
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ return 0;
+}
#else
+static int cpsw_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
@@ -2128,9 +2140,9 @@ static int cpsw_get_ts_info(struct net_device *ndev,
info->phc_index = -1;
info->tx_types = 0;
info->rx_filters = 0;
-#endif
return 0;
}
+#endif
static int cpsw_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *ecmd)
@@ -2512,18 +2524,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->active_slave = prop;
- if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
- dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
- return -EINVAL;
- }
- data->cpts_clock_mult = prop;
-
- if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
- dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
- return -EINVAL;
- }
- data->cpts_clock_shift = prop;
-
data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
* sizeof(struct cpsw_slave_data),
GFP_KERNEL);
@@ -2782,6 +2782,7 @@ static int cpsw_probe(struct platform_device *pdev)
struct cpdma_params dma_params;
struct cpsw_ale_params ale_params;
void __iomem *ss_regs;
+ void __iomem *cpts_regs;
struct resource *res, *ss_res;
const struct of_device_id *of_id;
struct gpio_descs *mode;
@@ -2809,12 +2810,6 @@ static int cpsw_probe(struct platform_device *pdev)
priv->dev = &ndev->dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
cpsw->rx_packet_max = max(rx_packet_max, 128);
- cpsw->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
- if (!cpsw->cpts) {
- dev_err(&pdev->dev, "error allocating cpts\n");
- ret = -ENOMEM;
- goto clean_ndev_ret;
- }
mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
if (IS_ERR(mode)) {
@@ -2902,7 +2897,7 @@ static int cpsw_probe(struct platform_device *pdev)
switch (cpsw->version) {
case CPSW_VERSION_1:
cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- cpsw->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
@@ -2916,7 +2911,7 @@ static int cpsw_probe(struct platform_device *pdev)
case CPSW_VERSION_3:
case CPSW_VERSION_4:
cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- cpsw->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
@@ -2983,6 +2978,12 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_dma_ret;
}
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
+ if (IS_ERR(cpsw->cpts)) {
+ ret = PTR_ERR(cpsw->cpts);
+ goto clean_ale_ret;
+ }
+
ndev->irq = platform_get_irq(pdev, 1);
if (ndev->irq < 0) {
dev_err(priv->dev, "error getting irq resource\n");
@@ -3098,6 +3099,7 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(cpsw->slaves[1].ndev);
unregister_netdev(ndev);
+ cpts_release(cpsw->cpts);
cpsw_ale_destroy(cpsw->ale);
cpdma_ctlr_destroy(cpsw->dma);
cpsw_remove_dt(pdev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 16b54c6f32c2..6c3037aa2cd3 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -31,8 +31,6 @@ struct cpsw_platform_data {
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
- u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
- u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
u32 ale_entries; /* ale table size */
u32 bd_ram_size; /*buffer descriptor ram size */
u32 mac_control; /* Mac control register */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 85a55b4ff8c0..0c0d48e5bea4 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,10 +31,8 @@
#include "cpts.h"
-#ifdef CONFIG_TI_CPTS
-
-#define cpts_read32(c, r) __raw_readl(&c->reg->r)
-#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
+#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
+#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
static int event_expired(struct cpts_event *event)
{
@@ -59,6 +57,26 @@ static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
return -1;
}
+static int cpts_purge_events(struct cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (event_expired(event)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ pr_debug("cpts: event pool cleaned up %d\n", removed);
+ return removed ? 0 : -1;
+}
+
/*
* Returns zero if matching event type was found.
*/
@@ -71,10 +89,12 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
if (cpts_fifo_pop(cpts, &hi, &lo))
break;
- if (list_empty(&cpts->pool)) {
- pr_err("cpts: event pool is empty\n");
+
+ if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
+ pr_err("cpts: event pool empty\n");
return -1;
}
+
event = list_first_entry(&cpts->pool, struct cpts_event, list);
event->tmo = jiffies + 2;
event->high = hi;
@@ -223,27 +243,9 @@ static void cpts_overflow_check(struct work_struct *work)
struct timespec64 ts;
struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
- cpts_write32(cpts, CPTS_EN, control);
- cpts_write32(cpts, TS_PEND_EN, int_enable);
cpts_ptp_gettime(&cpts->info, &ts);
pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
- schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
-}
-
-static void cpts_clk_init(struct device *dev, struct cpts *cpts)
-{
- cpts->refclk = devm_clk_get(dev, "cpts");
- if (IS_ERR(cpts->refclk)) {
- dev_err(dev, "Failed to get cpts refclk\n");
- cpts->refclk = NULL;
- return;
- }
- clk_prepare_enable(cpts->refclk);
-}
-
-static void cpts_clk_release(struct cpts *cpts)
-{
- clk_disable(cpts->refclk);
+ schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
}
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -334,6 +336,7 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
memset(ssh, 0, sizeof(*ssh));
ssh->hwtstamp = ns_to_ktime(ns);
}
+EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
@@ -349,60 +352,170 @@ void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
ssh.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &ssh);
}
+EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
-#endif /*CONFIG_TI_CPTS*/
-
-int cpts_register(struct device *dev, struct cpts *cpts,
- u32 mult, u32 shift)
+int cpts_register(struct cpts *cpts)
{
-#ifdef CONFIG_TI_CPTS
int err, i;
- unsigned long flags;
-
- cpts->info = cpts_info;
- cpts->clock = ptp_clock_register(&cpts->info, dev);
- if (IS_ERR(cpts->clock)) {
- err = PTR_ERR(cpts->clock);
- cpts->clock = NULL;
- return err;
- }
- spin_lock_init(&cpts->lock);
-
- cpts->cc.read = cpts_systim_read;
- cpts->cc.mask = CLOCKSOURCE_MASK(32);
- cpts->cc_mult = mult;
- cpts->cc.mult = mult;
- cpts->cc.shift = shift;
INIT_LIST_HEAD(&cpts->events);
INIT_LIST_HEAD(&cpts->pool);
for (i = 0; i < CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
- cpts_clk_init(dev, cpts);
+ clk_enable(cpts->refclk);
+
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
- spin_lock_irqsave(&cpts->lock, flags);
timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
- spin_unlock_irqrestore(&cpts->lock, flags);
-
- INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
- schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+ cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
+ if (IS_ERR(cpts->clock)) {
+ err = PTR_ERR(cpts->clock);
+ cpts->clock = NULL;
+ goto err_ptp;
+ }
cpts->phc_index = ptp_clock_index(cpts->clock);
-#endif
+
+ schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
return 0;
+
+err_ptp:
+ clk_disable(cpts->refclk);
+ return err;
}
+EXPORT_SYMBOL_GPL(cpts_register);
void cpts_unregister(struct cpts *cpts)
{
-#ifdef CONFIG_TI_CPTS
- if (cpts->clock) {
- ptp_clock_unregister(cpts->clock);
- cancel_delayed_work_sync(&cpts->overflow_work);
+ if (WARN_ON(!cpts->clock))
+ return;
+
+ cancel_delayed_work_sync(&cpts->overflow_work);
+
+ ptp_clock_unregister(cpts->clock);
+ cpts->clock = NULL;
+
+ cpts_write32(cpts, 0, int_enable);
+ cpts_write32(cpts, 0, control);
+
+ clk_disable(cpts->refclk);
+}
+EXPORT_SYMBOL_GPL(cpts_unregister);
+
+static void cpts_calc_mult_shift(struct cpts *cpts)
+{
+ u64 frac, maxsec, ns;
+ u32 freq;
+
+ freq = clk_get_rate(cpts->refclk);
+
+ /* Calc the maximum number of seconds which we can run before
+ * wrapping around.
+ */
+ maxsec = cpts->cc.mask;
+ do_div(maxsec, freq);
+ /* limit conversation rate to 10 sec as higher values will produce
+ * too small mult factors and so reduce the conversion accuracy
+ */
+ if (maxsec > 10)
+ maxsec = 10;
+
+ /* Calc overflow check period (maxsec / 2) */
+ cpts->ov_check_period = (HZ * maxsec) / 2;
+ dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n",
+ cpts->ov_check_period);
+
+ if (cpts->cc.mult || cpts->cc.shift)
+ return;
+
+ clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift,
+ freq, NSEC_PER_SEC, maxsec);
+
+ frac = 0;
+ ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac);
+
+ dev_info(cpts->dev,
+ "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n",
+ freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
+}
+
+static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
+{
+ int ret = -EINVAL;
+ u32 prop;
+
+ if (!of_property_read_u32(node, "cpts_clock_mult", &prop))
+ cpts->cc.mult = prop;
+
+ if (!of_property_read_u32(node, "cpts_clock_shift", &prop))
+ cpts->cc.shift = prop;
+
+ if ((cpts->cc.mult && !cpts->cc.shift) ||
+ (!cpts->cc.mult && cpts->cc.shift))
+ goto of_error;
+
+ return 0;
+
+of_error:
+ dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
+ return ret;
+}
+
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node)
+{
+ struct cpts *cpts;
+ int ret;
+
+ cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
+ if (!cpts)
+ return ERR_PTR(-ENOMEM);
+
+ cpts->dev = dev;
+ cpts->reg = (struct cpsw_cpts __iomem *)regs;
+ spin_lock_init(&cpts->lock);
+ INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
+
+ ret = cpts_of_parse(cpts, node);
+ if (ret)
+ return ERR_PTR(ret);
+
+ cpts->refclk = devm_clk_get(dev, "cpts");
+ if (IS_ERR(cpts->refclk)) {
+ dev_err(dev, "Failed to get cpts refclk\n");
+ return ERR_PTR(PTR_ERR(cpts->refclk));
}
- if (cpts->refclk)
- cpts_clk_release(cpts);
-#endif
+
+ clk_prepare(cpts->refclk);
+
+ cpts->cc.read = cpts_systim_read;
+ cpts->cc.mask = CLOCKSOURCE_MASK(32);
+ cpts->info = cpts_info;
+
+ cpts_calc_mult_shift(cpts);
+ /* save cc.mult original value as it can be modified
+ * by cpts_ptp_adjfreq().
+ */
+ cpts->cc_mult = cpts->cc.mult;
+
+ return cpts;
}
+EXPORT_SYMBOL_GPL(cpts_create);
+
+void cpts_release(struct cpts *cpts)
+{
+ if (!cpts)
+ return;
+
+ if (WARN_ON(!cpts->refclk))
+ return;
+
+ clk_unprepare(cpts->refclk);
+}
+EXPORT_SYMBOL_GPL(cpts_release);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI CPTS driver");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 69a46b92c7d6..c96eca2b1b46 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -20,11 +20,14 @@
#ifndef _TI_CPTS_H_
#define _TI_CPTS_H_
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clocksource.h>
#include <linux/device.h>
#include <linux/list.h>
+#include <linux/of.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
#include <linux/timecounter.h>
@@ -94,9 +97,6 @@ enum {
CPTS_EV_TX, /* Ethernet Transmit Event */
};
-/* This covers any input clock up to about 500 MHz. */
-#define CPTS_OVERFLOW_PERIOD (HZ * 8)
-
#define CPTS_FIFO_DEPTH 16
#define CPTS_MAX_EVENTS 32
@@ -108,10 +108,10 @@ struct cpts_event {
};
struct cpts {
+ struct device *dev;
struct cpsw_cpts __iomem *reg;
int tx_enable;
int rx_enable;
-#ifdef CONFIG_TI_CPTS
struct ptp_clock_info info;
struct ptp_clock *clock;
spinlock_t lock; /* protects time registers */
@@ -124,22 +124,86 @@ struct cpts {
struct list_head events;
struct list_head pool;
struct cpts_event pool_data[CPTS_MAX_EVENTS];
-#endif
+ unsigned long ov_check_period;
};
-#ifdef CONFIG_TI_CPTS
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+int cpts_register(struct cpts *cpts);
+void cpts_unregister(struct cpts *cpts);
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node);
+void cpts_release(struct cpts *cpts);
+
+static inline void cpts_rx_enable(struct cpts *cpts, int enable)
+{
+ cpts->rx_enable = enable;
+}
+
+static inline bool cpts_is_rx_enabled(struct cpts *cpts)
+{
+ return !!cpts->rx_enable;
+}
+
+static inline void cpts_tx_enable(struct cpts *cpts, int enable)
+{
+ cpts->tx_enable = enable;
+}
+
+static inline bool cpts_is_tx_enabled(struct cpts *cpts)
+{
+ return !!cpts->tx_enable;
+}
+
#else
+struct cpts;
+
static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
}
static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
}
+
+static inline
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node)
+{
+ return NULL;
+}
+
+static inline void cpts_release(struct cpts *cpts)
+{
+}
+
+static inline int
+cpts_register(struct cpts *cpts)
+{
+ return 0;
+}
+
+static inline void cpts_unregister(struct cpts *cpts)
+{
+}
+
+static inline void cpts_rx_enable(struct cpts *cpts, int enable)
+{
+}
+
+static inline bool cpts_is_rx_enabled(struct cpts *cpts)
+{
+ return false;
+}
+
+static inline void cpts_tx_enable(struct cpts *cpts, int enable)
+{
+}
+
+static inline bool cpts_is_tx_enabled(struct cpts *cpts)
+{
+ return false;
+}
#endif
-int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
-void cpts_unregister(struct cpts *cpts);
#endif
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 9f10da60e02d..057025722e3d 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -990,7 +990,12 @@ at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
}
#define AT86RF2XX_MAX_ED_LEVELS 0xF
-static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+static const s32 at86rf233_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, -7800, -7600,
+ -7400, -7200, -7000, -6800, -6600, -6400,
+};
+
+static const s32 at86rf231_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
-9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
-7100, -6900, -6700, -6500, -6300, -6100,
};
@@ -1343,7 +1348,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_sleep_to_off = 1000,
.t_frame = 4096,
.t_p_ack = 545,
- .rssi_base_val = -91,
+ .rssi_base_val = -94,
.set_channel = at86rf23x_set_channel,
.set_txpower = at86rf23x_set_txpower,
};
@@ -1557,9 +1562,6 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
- lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels;
- lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels);
-
lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
switch (part) {
@@ -1575,6 +1577,8 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->symbol_duration = 16;
lp->hw->phy->supported.tx_powers = at86rf231_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
+ lp->hw->phy->supported.cca_ed_levels = at86rf231_ed_levels;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf231_ed_levels);
break;
case 7:
chip = "at86rf212";
@@ -1598,6 +1602,8 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->symbol_duration = 16;
lp->hw->phy->supported.tx_powers = at86rf233_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
+ lp->hw->phy->supported.cca_ed_levels = at86rf233_ed_levels;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf233_ed_levels);
break;
default:
chip = "unknown";
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index ec387efb61d0..0d673f7682ee 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -218,7 +218,7 @@ static int fakelb_probe(struct platform_device *pdev)
goto err_slave;
}
- dev_info(&pdev->dev, "added ieee802154 hardware\n");
+ dev_info(&pdev->dev, "added %i fake ieee802154 hardware devices\n", numlbs);
return 0;
err_slave:
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 05a62d2216c5..031093e1c25f 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -97,7 +97,6 @@ struct ipvl_port {
struct work_struct wq;
struct sk_buff_head backlog;
int count;
- struct rcu_head rcu;
};
static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 5430460167b5..ffe8994e64fc 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -128,7 +128,7 @@ static int ipvlan_port_create(struct net_device *dev)
return 0;
err:
- kfree_rcu(port, rcu);
+ kfree(port);
return err;
}
@@ -145,7 +145,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
__skb_queue_purge(&port->backlog);
- kfree_rcu(port, rcu);
+ kfree(port);
}
#define IPVLAN_FEATURES \
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index a198946bc54f..8716b8c07feb 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1723,6 +1723,7 @@ static int irda_usb_probe(struct usb_interface *intf,
/* Don't change this buffer size and allocation without doing
* some heavy and complete testing. Don't ask why :-(
* Jean II */
+ ret = -ENOMEM;
self->speed_buff = kzalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL);
if (!self->speed_buff)
goto err_out_3;
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 96745888a4fc..f293d33fb28f 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -39,6 +39,8 @@
*
********************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -84,13 +86,13 @@ static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
/* Some prototypes */
static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
- unsigned int dma);
+ unsigned int dma);
static int w83977af_close(struct w83977af_ir *self);
static int w83977af_probe(int iobase, int irq, int dma);
static int w83977af_dma_receive(struct w83977af_ir *self);
static int w83977af_dma_receive_complete(struct w83977af_ir *self);
static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
- struct net_device *dev);
+ struct net_device *dev);
static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
@@ -110,7 +112,7 @@ static int __init w83977af_init(void)
{
int i;
- for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
return 0;
}
@@ -127,7 +129,7 @@ static void __exit w83977af_cleanup(void)
{
int i;
- for (i=0; i < ARRAY_SIZE(dev_self); i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
w83977af_close(dev_self[i]);
}
@@ -155,8 +157,8 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
/* Lock the port that we need */
if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
- pr_debug("%s(), can't get iobase of 0x%03x\n",
- __func__ , iobase);
+ pr_debug("%s: can't get iobase of 0x%03x\n",
+ __func__, iobase);
return -ENODEV;
}
@@ -168,9 +170,8 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
* Allocate new instance of the driver
*/
dev = alloc_irdadev(sizeof(struct w83977af_ir));
- if (dev == NULL) {
- printk( KERN_ERR "IrDA: Can't allocate memory for "
- "IrDA control block!\n");
+ if (!dev) {
+ pr_err("IrDA: Can't allocate memory for IrDA control block!\n");
err = -ENOMEM;
goto err_out;
}
@@ -178,7 +179,6 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
self = netdev_priv(dev);
spin_lock_init(&self->lock);
-
/* Initialize IO */
self->io.fir_base = iobase;
self->io.irq = irq;
@@ -192,8 +192,8 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
/* The only value we must override it the baudrate */
/* FIXME: The HP HDLS-1100 does not support 1152000! */
- self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
- IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
+ self->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 | IR_57600 |
+ IR_115200 | IR_576000 | IR_1152000 | (IR_4000000 << 8);
/* The HP HDLS-1100 needs 1 ms according to the specs */
self->qos.min_turn_time.bits = qos_mtt_bits;
@@ -207,7 +207,7 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
self->rx_buff.head =
dma_zalloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
- if (self->rx_buff.head == NULL) {
+ if (!self->rx_buff.head) {
err = -ENOMEM;
goto err_out1;
}
@@ -215,7 +215,7 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
self->tx_buff.head =
dma_zalloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
- if (self->tx_buff.head == NULL) {
+ if (!self->tx_buff.head) {
err = -ENOMEM;
goto err_out2;
}
@@ -230,7 +230,7 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
err = register_netdev(dev);
if (err) {
- net_err_ratelimited("%s(), register_netdevice() failed!\n",
+ net_err_ratelimited("%s:, register_netdevice() failed!\n",
__func__);
goto err_out3;
}
@@ -263,7 +263,7 @@ static int w83977af_close(struct w83977af_ir *self)
{
int iobase;
- iobase = self->io.fir_base;
+ iobase = self->io.fir_base;
#ifdef CONFIG_USE_W977_PNP
/* enter PnP configuration mode */
@@ -281,8 +281,7 @@ static int w83977af_close(struct w83977af_ir *self)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- pr_debug("%s(), Releasing Region %03x\n",
- __func__ , self->io.fir_base);
+ pr_debug("%s: Releasing Region %03x\n", __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -303,7 +302,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
int version;
int i;
- for (i=0; i < 2; i++) {
+ for (i = 0; i < 2; i++) {
#ifdef CONFIG_USE_W977_PNP
/* Enter PnP configuration mode */
w977_efm_enter(efbase[i]);
@@ -317,7 +316,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
w977_write_reg(0x70, irq, efbase[i]);
#ifdef CONFIG_ARCH_NETWINDER
/* Netwinder uses 1 higher than Linux */
- w977_write_reg(0x74, dma+1, efbase[i]);
+ w977_write_reg(0x74, dma + 1, efbase[i]);
#else
w977_write_reg(0x74, dma, efbase[i]);
#endif /* CONFIG_ARCH_NETWINDER */
@@ -333,23 +332,23 @@ static int w83977af_probe(int iobase, int irq, int dma)
#endif /* CONFIG_USE_W977_PNP */
/* Disable Advanced mode */
switch_bank(iobase, SET2);
- outb(iobase+2, 0x00);
+ outb(iobase + 2, 0x00);
/* Turn on UART (global) interrupts */
switch_bank(iobase, SET0);
- outb(HCR_EN_IRQ, iobase+HCR);
+ outb(HCR_EN_IRQ, iobase + HCR);
/* Switch to advanced mode */
switch_bank(iobase, SET2);
- outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
+ outb(inb(iobase + ADCR1) | ADCR1_ADV_SL, iobase + ADCR1);
/* Set default IR-mode */
switch_bank(iobase, SET0);
- outb(HCR_SIR, iobase+HCR);
+ outb(HCR_SIR, iobase + HCR);
/* Read the Advanced IR ID */
switch_bank(iobase, SET3);
- version = inb(iobase+AUID);
+ version = inb(iobase + AUID);
/* Should be 0x1? */
if (0x10 == (version & 0xf0)) {
@@ -357,17 +356,17 @@ static int w83977af_probe(int iobase, int irq, int dma)
/* Set FIFO size to 32 */
switch_bank(iobase, SET2);
- outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
/* Set FIFO threshold to TX17, RX16 */
switch_bank(iobase, SET0);
- outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
- UFR_EN_FIFO,iobase+UFR);
+ outb(UFR_RXTL | UFR_TXTL | UFR_TXF_RST | UFR_RXF_RST |
+ UFR_EN_FIFO, iobase + UFR);
/* Receiver frame length */
switch_bank(iobase, SET4);
- outb(2048 & 0xff, iobase+6);
- outb((2048 >> 8) & 0x1f, iobase+7);
+ outb(2048 & 0xff, iobase + 6);
+ outb((2048 >> 8) & 0x1f, iobase + 7);
/*
* Init HP HSDL-1100 transceiver.
@@ -382,7 +381,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
* CIRRX pin 40 connected to pin 37
*/
switch_bank(iobase, SET7);
- outb(0x40, iobase+7);
+ outb(0x40, iobase + 7);
net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
version);
@@ -390,7 +389,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
return 0;
} else {
/* Try next extented function register address */
- pr_debug("%s(), Wrong chip version", __func__);
+ pr_debug("%s: Wrong chip version\n", __func__);
}
}
return -1;
@@ -408,66 +407,67 @@ static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
self->io.speed = speed;
/* Save current bank */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable interrupts */
switch_bank(iobase, SET0);
- outb(0, iobase+ICR);
+ outb(0, iobase + ICR);
/* Select Set 2 */
switch_bank(iobase, SET2);
- outb(0x00, iobase+ABHL);
+ outb(0x00, iobase + ABHL);
switch (speed) {
- case 9600: outb(0x0c, iobase+ABLL); break;
- case 19200: outb(0x06, iobase+ABLL); break;
- case 38400: outb(0x03, iobase+ABLL); break;
- case 57600: outb(0x02, iobase+ABLL); break;
- case 115200: outb(0x01, iobase+ABLL); break;
+ case 9600: outb(0x0c, iobase + ABLL); break;
+ case 19200: outb(0x06, iobase + ABLL); break;
+ case 38400: outb(0x03, iobase + ABLL); break;
+ case 57600: outb(0x02, iobase + ABLL); break;
+ case 115200: outb(0x01, iobase + ABLL); break;
case 576000:
ir_mode = HCR_MIR_576;
- pr_debug("%s(), handling baud of 576000\n", __func__);
+ pr_debug("%s: handling baud of 576000\n", __func__);
break;
case 1152000:
ir_mode = HCR_MIR_1152;
- pr_debug("%s(), handling baud of 1152000\n", __func__);
+ pr_debug("%s: handling baud of 1152000\n", __func__);
break;
case 4000000:
ir_mode = HCR_FIR;
- pr_debug("%s(), handling baud of 4000000\n", __func__);
+ pr_debug("%s: handling baud of 4000000\n", __func__);
break;
default:
ir_mode = HCR_FIR;
- pr_debug("%s(), unknown baud rate of %d\n", __func__ , speed);
+ pr_debug("%s: unknown baud rate of %d\n", __func__, speed);
break;
}
/* Set speed mode */
switch_bank(iobase, SET0);
- outb(ir_mode, iobase+HCR);
+ outb(ir_mode, iobase + HCR);
/* set FIFO size to 32 */
switch_bank(iobase, SET2);
- outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
/* set FIFO threshold to TX17, RX16 */
switch_bank(iobase, SET0);
- outb(0x00, iobase+UFR); /* Reset */
- outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
- outb(0xa7, iobase+UFR);
+ outb(0x00, iobase + UFR); /* Reset */
+ outb(UFR_EN_FIFO, iobase + UFR); /* First we must enable FIFO */
+ outb(0xa7, iobase + UFR);
netif_wake_queue(self->netdev);
/* Enable some interrupts so we can receive frames */
switch_bank(iobase, SET0);
if (speed > PIO_MAX_SPEED) {
- outb(ICR_EFSFI, iobase+ICR);
+ outb(ICR_EFSFI, iobase + ICR);
w83977af_dma_receive(self);
- } else
- outb(ICR_ERBRI, iobase+ICR);
+ } else {
+ outb(ICR_ERBRI, iobase + ICR);
+ }
/* Restore SSR */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
}
/*
@@ -477,7 +477,7 @@ static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
*
*/
static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev)
{
struct w83977af_ir *self;
__s32 speed;
@@ -489,8 +489,7 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
iobase = self->io.fir_base;
- pr_debug("%s(%ld), skb->len=%d\n", __func__ , jiffies,
- (int)skb->len);
+ pr_debug("%s: %ld, skb->len=%d\n", __func__, jiffies, (int)skb->len);
/* Lock transmit buffer */
netif_stop_queue(dev);
@@ -503,12 +502,12 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
w83977af_change_speed(self, speed);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
- } else
- self->new_speed = speed;
+ }
+ self->new_speed = speed;
}
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Decide if we should use PIO or DMA transfer */
if (self->io.speed > PIO_MAX_SPEED) {
@@ -517,15 +516,15 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
self->tx_buff.len = skb->len;
mtt = irda_get_mtt(skb);
- pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
- if (mtt > 1000)
- mdelay(mtt/1000);
- else if (mtt)
+ pr_debug("%s: %ld, mtt=%d\n", __func__, jiffies, mtt);
+ if (mtt > 1000)
+ mdelay(mtt / 1000);
+ else if (mtt)
udelay(mtt);
/* Enable DMA interrupt */
switch_bank(iobase, SET0);
- outb(ICR_EDMAI, iobase+ICR);
+ outb(ICR_EDMAI, iobase + ICR);
w83977af_dma_write(self, iobase);
} else {
self->tx_buff.data = self->tx_buff.head;
@@ -534,12 +533,12 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
/* Add interrupt on tx low level (will fire immediately) */
switch_bank(iobase, SET0);
- outb(ICR_ETXTHI, iobase+ICR);
+ outb(ICR_ETXTHI, iobase + ICR);
}
dev_kfree_skb(skb);
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return NETDEV_TX_OK;
}
@@ -553,28 +552,29 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
{
__u8 set;
- pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len);
+
+ pr_debug("%s: len=%d\n", __func__, self->tx_buff.len);
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
/* Choose transmit DMA channel */
switch_bank(iobase, SET2);
- outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
+ outb(ADCR1_D_CHSW | /*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase + ADCR1);
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
DMA_MODE_WRITE);
self->io.direction = IO_XMIT;
/* Enable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
+ outb(inb(iobase + HCR) | HCR_EN_DMA | HCR_TX_WT, iobase + HCR);
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
}
/*
@@ -589,28 +589,27 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
__u8 set;
/* Save current bank */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET0);
- if (!(inb_p(iobase+USR) & USR_TSRE)) {
- pr_debug("%s(), warning, FIFO not empty yet!\n", __func__);
+ if (!(inb_p(iobase + USR) & USR_TSRE)) {
+ pr_debug("%s: warning, FIFO not empty yet!\n", __func__);
fifo_size -= 17;
- pr_debug("%s(), %d bytes left in tx fifo\n",
- __func__ , fifo_size);
+ pr_debug("%s: %d bytes left in tx fifo\n", __func__, fifo_size);
}
/* Fill FIFO with current frame */
while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
- outb(buf[actual++], iobase+TBR);
+ outb(buf[actual++], iobase + TBR);
}
- pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
- __func__ , fifo_size, actual, len);
+ pr_debug("%s: fifo_size %d ; %d sent of %d\n",
+ __func__, fifo_size, actual, len);
/* Restore bank */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return actual;
}
@@ -627,31 +626,31 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
int iobase;
__u8 set;
- pr_debug("%s(%ld)\n", __func__ , jiffies);
+ pr_debug("%s: %ld\n", __func__, jiffies);
- IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self, return;);
iobase = self->io.fir_base;
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
/* Check for underrun! */
- if (inb(iobase+AUDR) & AUDR_UNDR) {
- pr_debug("%s(), Transmit underrun!\n", __func__);
+ if (inb(iobase + AUDR) & AUDR_UNDR) {
+ pr_debug("%s: Transmit underrun!\n", __func__);
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
/* Clear bit, by writing 1 to it */
- outb(AUDR_UNDR, iobase+AUDR);
- } else
+ outb(AUDR_UNDR, iobase + AUDR);
+ } else {
self->netdev->stats.tx_packets++;
-
+ }
if (self->new_speed) {
w83977af_change_speed(self, self->new_speed);
@@ -663,7 +662,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
netif_wake_queue(self->netdev);
/* Restore set */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
}
/*
@@ -681,23 +680,23 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
unsigned long flags;
__u8 hcr;
#endif
- IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self, return -1;);
pr_debug("%s\n", __func__);
- iobase= self->io.fir_base;
+ iobase = self->io.fir_base;
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
/* Choose DMA Rx, DMA Fairness, and Advanced mode */
switch_bank(iobase, SET2);
- outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
- iobase+ADCR1);
+ outb((inb(iobase + ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/ | ADCR1_ADV_SL,
+ iobase + ADCR1);
self->io.direction = IO_RECV;
self->rx_buff.data = self->rx_buff.head;
@@ -720,21 +719,21 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
* be finished transmitting yet
*/
switch_bank(iobase, SET0);
- outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
+ outb(UFR_RXTL | UFR_TXTL | UFR_RXF_RST | UFR_EN_FIFO, iobase + UFR);
self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
/* Enable DMA */
switch_bank(iobase, SET0);
#ifdef CONFIG_ARCH_NETWINDER
- hcr = inb(iobase+HCR);
- outb(hcr | HCR_EN_DMA, iobase+HCR);
+ hcr = inb(iobase + HCR);
+ outb(hcr | HCR_EN_DMA, iobase + HCR);
enable_dma(self->io.dma);
spin_unlock_irqrestore(&self->lock, flags);
#else
- outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) | HCR_EN_DMA, iobase + HCR);
#endif
/* Restore set */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return 0;
}
@@ -761,17 +760,17 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
iobase = self->io.fir_base;
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
iobase = self->io.fir_base;
/* Read status FIFO */
switch_bank(iobase, SET5);
- while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
+ while ((status = inb(iobase + FS_FO)) & FS_FO_FSFDR) {
st_fifo->entries[st_fifo->tail].status = status;
- st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
- st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
+ st_fifo->entries[st_fifo->tail].len = inb(iobase + RFLFL);
+ st_fifo->entries[st_fifo->tail].len |= inb(iobase + RFLFH) << 8;
st_fifo->tail++;
st_fifo->len++;
@@ -814,16 +813,15 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
} else {
/* Check if we have transferred all data to memory */
switch_bank(iobase, SET0);
- if (inb(iobase+USR) & USR_RDR) {
+ if (inb(iobase + USR) & USR_RDR)
udelay(80); /* Should be enough!? */
- }
- skb = dev_alloc_skb(len+1);
- if (skb == NULL) {
- printk(KERN_INFO
- "%s(), memory squeeze, dropping frame.\n", __func__);
+ skb = dev_alloc_skb(len + 1);
+ if (!skb) {
+ pr_info("%s: memory squeeze, dropping frame\n",
+ __func__);
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return FALSE;
}
@@ -833,12 +831,12 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
/* Copy frame without CRC */
if (self->io.speed < 4000000) {
- skb_put(skb, len-2);
+ skb_put(skb, len - 2);
skb_copy_to_linear_data(skb,
self->rx_buff.data,
len - 2);
} else {
- skb_put(skb, len-4);
+ skb_put(skb, len - 4);
skb_copy_to_linear_data(skb,
self->rx_buff.data,
len - 4);
@@ -855,7 +853,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
}
}
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return TRUE;
}
@@ -871,16 +869,16 @@ static void w83977af_pio_receive(struct w83977af_ir *self)
__u8 byte = 0x00;
int iobase;
- IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self, return;);
iobase = self->io.fir_base;
/* Receive all characters in Rx FIFO */
do {
- byte = inb(iobase+RBR);
+ byte = inb(iobase + RBR);
async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
byte);
- } while (inb(iobase+USR) & USR_RDR); /* Data available */
+ } while (inb(iobase + USR) & USR_RDR); /* Data available */
}
/*
@@ -896,7 +894,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
__u8 set;
int iobase;
- pr_debug("%s(), isr=%#x\n", __func__ , isr);
+ pr_debug("%s: isr=%#x\n", __func__, isr);
iobase = self->io.fir_base;
/* Transmit FIFO low on data */
@@ -916,10 +914,10 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
if (self->tx_buff.len > 0) {
new_icr |= ICR_ETXTHI;
} else {
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET0);
- outb(AUDR_SFEND, iobase+AUDR);
- outb(set, iobase+SSR);
+ outb(AUDR_SFEND, iobase + AUDR);
+ outb(set, iobase + SSR);
self->netdev->stats.tx_packets++;
@@ -932,7 +930,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
if (isr & ISR_TXEMP_I) {
/* Check if we need to change the speed? */
if (self->new_speed) {
- pr_debug("%s(), Changing speed!\n", __func__);
+ pr_debug("%s: Changing speed!\n", __func__);
w83977af_change_speed(self, self->new_speed);
self->new_speed = 0;
}
@@ -965,12 +963,11 @@ static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
int iobase;
iobase = self->io.fir_base;
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* End of frame detected in FIFO */
- if (isr & (ISR_FEND_I|ISR_FSF_I)) {
+ if (isr & (ISR_FEND_I | ISR_FSF_I)) {
if (w83977af_dma_receive_complete(self)) {
-
/* Wait for next status FIFO interrupt */
new_icr |= ICR_EFSFI;
} else {
@@ -978,11 +975,11 @@ static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
/* Set timer value, resolution 1 ms */
switch_bank(iobase, SET4);
- outb(0x01, iobase+TMRL); /* 1 ms */
- outb(0x00, iobase+TMRH);
+ outb(0x01, iobase + TMRL); /* 1 ms */
+ outb(0x00, iobase + TMRH);
/* Start timer */
- outb(IR_MSL_EN_TMR, iobase+IR_MSL);
+ outb(IR_MSL_EN_TMR, iobase + IR_MSL);
new_icr |= ICR_ETMRI;
}
@@ -991,7 +988,7 @@ static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
if (isr & ISR_TMR_I) {
/* Disable timer */
switch_bank(iobase, SET4);
- outb(0, iobase+IR_MSL);
+ outb(0, iobase + IR_MSL);
/* Clear timer event */
/* switch_bank(iobase, SET0); */
@@ -1026,7 +1023,7 @@ static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
}
/* Restore set */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return new_icr;
}
@@ -1049,24 +1046,24 @@ static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
iobase = self->io.fir_base;
/* Save current bank */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET0);
- icr = inb(iobase+ICR);
- isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
+ icr = inb(iobase + ICR);
+ isr = inb(iobase + ISR) & icr; /* Mask out the interesting ones */
- outb(0, iobase+ICR); /* Disable interrupts */
+ outb(0, iobase + ICR); /* Disable interrupts */
if (isr) {
/* Dispatch interrupt handler for the current speed */
- if (self->io.speed > PIO_MAX_SPEED )
+ if (self->io.speed > PIO_MAX_SPEED)
icr = w83977af_fir_interrupt(self, isr);
else
icr = w83977af_sir_interrupt(self, isr);
}
- outb(icr, iobase+ICR); /* Restore (new) interrupts */
- outb(set, iobase+SSR); /* Restore bank register */
+ outb(icr, iobase + ICR); /* Restore (new) interrupts */
+ outb(set, iobase + SSR); /* Restore bank register */
return IRQ_RETVAL(isr);
}
@@ -1082,21 +1079,22 @@ static int w83977af_is_receiving(struct w83977af_ir *self)
int iobase;
__u8 set;
- IRDA_ASSERT(self != NULL, return FALSE;);
+ IRDA_ASSERT(self, return FALSE;);
if (self->io.speed > 115200) {
iobase = self->io.fir_base;
/* Check if rx FIFO is not empty */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET2);
- if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
+ if ((inb(iobase + RXFDTH) & 0x3f) != 0) {
/* We are receiving something */
status = TRUE;
}
- outb(set, iobase+SSR);
- } else
+ outb(set, iobase + SSR);
+ } else {
status = (self->rx_buff.state != OUTSIDE_FRAME);
+ }
return status;
}
@@ -1114,16 +1112,15 @@ static int w83977af_net_open(struct net_device *dev)
char hwname[32];
__u8 set;
-
- IRDA_ASSERT(dev != NULL, return -1;);
+ IRDA_ASSERT(dev, return -1;);
self = netdev_priv(dev);
- IRDA_ASSERT(self != NULL, return 0;);
+ IRDA_ASSERT(self, return 0;);
iobase = self->io.fir_base;
if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
- (void *) dev)) {
+ (void *)dev)) {
return -EAGAIN;
}
/*
@@ -1136,18 +1133,19 @@ static int w83977af_net_open(struct net_device *dev)
}
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Enable some interrupts so we can receive frames again */
switch_bank(iobase, SET0);
if (self->io.speed > 115200) {
- outb(ICR_EFSFI, iobase+ICR);
+ outb(ICR_EFSFI, iobase + ICR);
w83977af_dma_receive(self);
- } else
- outb(ICR_ERBRI, iobase+ICR);
+ } else {
+ outb(ICR_ERBRI, iobase + ICR);
+ }
/* Restore bank register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
/* Ready to play! */
netif_start_queue(dev);
@@ -1176,11 +1174,11 @@ static int w83977af_net_close(struct net_device *dev)
int iobase;
__u8 set;
- IRDA_ASSERT(dev != NULL, return -1;);
+ IRDA_ASSERT(dev, return -1;);
self = netdev_priv(dev);
- IRDA_ASSERT(self != NULL, return 0;);
+ IRDA_ASSERT(self, return 0;);
iobase = self->io.fir_base;
@@ -1195,17 +1193,17 @@ static int w83977af_net_close(struct net_device *dev)
disable_dma(self->io.dma);
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable interrupts */
switch_bank(iobase, SET0);
- outb(0, iobase+ICR);
+ outb(0, iobase + ICR);
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
/* Restore bank register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return 0;
}
@@ -1218,18 +1216,18 @@ static int w83977af_net_close(struct net_device *dev)
*/
static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct if_irda_req *irq = (struct if_irda_req *)rq;
struct w83977af_ir *self;
unsigned long flags;
int ret = 0;
- IRDA_ASSERT(dev != NULL, return -1;);
+ IRDA_ASSERT(dev, return -1;);
self = netdev_priv(dev);
- IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self, return -1;);
- pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
+ pr_debug("%s: %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
spin_lock_irqsave(&self->lock, flags);
@@ -1263,7 +1261,6 @@ MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
MODULE_LICENSE("GPL");
-
module_param(qos_mtt_bits, int, 0);
MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
module_param_array(io, int, NULL, 0);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 3c0a1714977b..20b3fdf282c5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -43,7 +43,6 @@ struct macvlan_port {
struct net_device *dev;
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
- struct rcu_head rcu;
struct sk_buff_head bc_queue;
struct work_struct bc_work;
bool passthru;
@@ -1151,7 +1150,7 @@ static void macvlan_port_destroy(struct net_device *dev)
cancel_work_sync(&port->bc_work);
__skb_queue_purge(&port->bc_queue);
- kfree_rcu(port, rcu);
+ kfree(port);
}
static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 7b7c70e2341e..2de7faee9b19 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -27,24 +27,6 @@ static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static int nlmon_is_valid_mtu(int new_mtu)
-{
- /* Note that in netlink we do not really have an upper limit. On
- * default, we use NLMSG_GOODSIZE. Here at least we should make
- * sure that it's at least the header size.
- */
- return new_mtu >= (int) sizeof(struct nlmsghdr);
-}
-
-static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (!nlmon_is_valid_mtu(new_mtu))
- return -EINVAL;
-
- dev->mtu = new_mtu;
- return 0;
-}
-
static int nlmon_dev_init(struct net_device *dev)
{
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
@@ -124,7 +106,6 @@ static const struct net_device_ops nlmon_ops = {
.ndo_stop = nlmon_close,
.ndo_start_xmit = nlmon_xmit,
.ndo_get_stats64 = nlmon_get_stats64,
- .ndo_change_mtu = nlmon_change_mtu,
};
static void nlmon_setup(struct net_device *dev)
@@ -145,6 +126,7 @@ static void nlmon_setup(struct net_device *dev)
* expected in most cases.
*/
dev->mtu = NLMSG_GOODSIZE;
+ dev->min_mtu = sizeof(struct nlmsghdr);
}
static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 800b39f06279..320d0dc33b3d 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -88,7 +88,9 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
.phy_id = _id, \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_BASIC_FEATURES, \
+ .features = (PHY_BASIC_FEATURES | \
+ SUPPORTED_Pause | \
+ SUPPORTED_Asym_Pause), \
.flags = PHY_HAS_INTERRUPT, \
\
.soft_reset = genphy_soft_reset, \
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a3ac8636f3ba..a569e61bc1d9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1286,7 +1286,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_probe_transport_header(skb, 0);
rxhash = skb_get_hash(skb);
+#ifndef CONFIG_4KSTACKS
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+#else
netif_rx_ni(skb);
+#endif
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 0c459e92f1b3..019f758953fc 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3494,6 +3494,7 @@ static int lan78xx_probe(struct usb_interface *intf,
if (buf) {
dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_intr) {
+ ret = -ENOMEM;
kfree(buf);
goto out3;
} else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a21d93a54cef..b425fa1013af 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -969,12 +969,17 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
int ret;
- struct sockaddr *addr = p;
+ struct sockaddr *addr;
struct scatterlist sg;
- ret = eth_prepare_mac_addr_change(dev, p);
+ addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ memcpy(addr, p, sizeof(*addr));
+
+ ret = eth_prepare_mac_addr_change(dev, addr);
if (ret)
- return ret;
+ goto out;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
sg_init_one(&sg, addr->sa_data, dev->addr_len);
@@ -982,7 +987,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
dev_warn(&vdev->dev,
"Failed to set mac address by vq command.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
@@ -996,8 +1002,11 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
}
eth_commit_mac_addr_change(dev, p);
+ ret = 0;
- return 0;
+out:
+ kfree(addr);
+ return ret;
}
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 61580b19f9f6..22f884c97387 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -124,6 +124,8 @@ struct drbg_state {
struct skcipher_request *ctr_req; /* CTR mode request handle */
__u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */
__u8 *ctr_null_value; /* CTR mode aligned zero buf */
+ __u8 *outscratchpadbuf; /* CTR mode output scratchpad */
+ __u8 *outscratchpad; /* CTR mode aligned outbuf */
struct completion ctr_completion; /* CTR mode async handler */
int ctr_async_err; /* CTR mode async error */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 69d0a7f12a3b..8796ff03f472 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -216,6 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+void bpf_prog_calc_digest(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 97338134398f..f078d2b1cff6 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -14,6 +14,7 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/capability.h>
+#include <linux/cryptohash.h>
#include <net/sch_generic.h>
@@ -56,6 +57,9 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
+/* Maximum BPF program size in bytes. */
+#define MAX_BPF_SIZE (BPF_MAXINSNS * sizeof(struct bpf_insn))
+
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -404,8 +408,9 @@ struct bpf_prog {
cb_access:1, /* Is control block accessed? */
dst_needed:1; /* Do we need dst entry? */
kmemcheck_bitfield_end(meta);
- u32 len; /* Number of filter blocks */
enum bpf_prog_type type; /* Type of BPF program */
+ u32 len; /* Number of filter blocks */
+ u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const void *ctx,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 734bab4c3bef..fc5848dad7a4 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -186,7 +186,6 @@ struct tcp_sock {
u32 tsoffset; /* timestamp offset */
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
- unsigned long tsq_flags;
/* Data for direct copy to user */
struct {
@@ -364,7 +363,7 @@ struct tcp_sock {
u32 *saved_syn;
};
-enum tsq_flags {
+enum tsq_enum {
TSQ_THROTTLED,
TSQ_QUEUED,
TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */
@@ -375,6 +374,15 @@ enum tsq_flags {
*/
};
+enum tsq_flags {
+ TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
+ TSQF_QUEUED = (1UL << TSQ_QUEUED),
+ TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
+ TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
+ TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
+ TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
+};
+
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9dddf77a69cc..1d716449209e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -36,7 +36,7 @@ struct tc_action {
struct tcf_t tcfa_tm;
struct gnet_stats_basic_packed tcfa_bstats;
struct gnet_stats_queue tcfa_qstats;
- struct gnet_stats_rate_est64 tcfa_rate_est;
+ struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
struct rcu_head tcfa_rcu;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 0a1e21d7bce1..01487192f628 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -197,7 +197,7 @@ typedef struct {
#define BDADDR_LE_PUBLIC 0x01
#define BDADDR_LE_RANDOM 0x02
-static inline bool bdaddr_type_is_valid(__u8 type)
+static inline bool bdaddr_type_is_valid(u8 type)
{
switch (type) {
case BDADDR_BREDR:
@@ -209,7 +209,7 @@ static inline bool bdaddr_type_is_valid(__u8 type)
return false;
}
-static inline bool bdaddr_type_is_le(__u8 type)
+static inline bool bdaddr_type_is_le(u8 type)
{
switch (type) {
case BDADDR_LE_PUBLIC:
@@ -279,15 +279,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
struct l2cap_ctrl {
- __u8 sframe:1,
+ u8 sframe:1,
poll:1,
final:1,
fcs:1,
sar:2,
super:2;
- __u16 reqseq;
- __u16 txseq;
- __u8 retries;
+
+ u16 reqseq;
+ u16 txseq;
+ u8 retries;
__le16 psm;
bdaddr_t bdaddr;
struct l2cap_chan *chan;
@@ -303,7 +304,7 @@ typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
#define HCI_REQ_SKB BIT(1)
struct hci_ctrl {
- __u16 opcode;
+ u16 opcode;
u8 req_flags;
u8 req_event;
union {
@@ -313,10 +314,10 @@ struct hci_ctrl {
};
struct bt_skb_cb {
- __u8 pkt_type;
- __u8 force_active;
- __u16 expect;
- __u8 incoming:1;
+ u8 pkt_type;
+ u8 force_active;
+ u16 expect;
+ u8 incoming:1;
union {
struct l2cap_ctrl l2cap;
struct hci_ctrl hci;
@@ -366,7 +367,7 @@ out:
return NULL;
}
-int bt_to_errno(__u16 code);
+int bt_to_errno(u16 code);
void hci_sock_set_flag(struct sock *sk, int nr);
void hci_sock_clear_flag(struct sock *sk, int nr);
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 231e121cc7d9..8b7aa370e7a4 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -11,6 +11,8 @@ struct gnet_stats_basic_cpu {
struct u64_stats_sync syncp;
};
+struct net_rate_estimator;
+
struct gnet_dump {
spinlock_t * lock;
struct sk_buff * skb;
@@ -42,8 +44,7 @@ void __gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
- const struct gnet_stats_basic_packed *b,
- struct gnet_stats_rate_est64 *r);
+ struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
@@ -53,16 +54,16 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt);
-void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est64 *rate_est);
+void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **ptr,
spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt);
-bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
- const struct gnet_stats_rate_est64 *rate_est);
+bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
+bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
+ struct gnet_stats_rate_est64 *sample);
#endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 79f45e19f31e..130e58361f99 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -1,19 +1,23 @@
#ifndef _XT_RATEEST_H
#define _XT_RATEEST_H
+#include <net/gen_stats.h>
+
struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
struct gnet_stats_basic_packed bstats;
spinlock_t lock;
- /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
- struct gnet_stats_rate_est64 rstats;
+
/* following fields not accessed in hot path */
+ unsigned int refcnt;
struct hlist_node list;
char name[IFNAMSIZ];
- unsigned int refcnt;
struct gnet_estimator params;
struct rcu_head rcu;
+
+ /* keep this field far away to speedup xt_rateest_mt() */
+ struct net_rate_estimator __rcu *rate_est;
};
struct xt_rateest *xt_rateest_lookup(const char *name);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e6aa0a249672..498f81b229a4 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -76,7 +76,7 @@ struct Qdisc {
struct netdev_queue *dev_queue;
- struct gnet_stats_rate_est64 rate_est;
+ struct net_rate_estimator __rcu *rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
diff --git a/include/net/sock.h b/include/net/sock.h
index 69afda6bea15..1749e38d0301 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -343,6 +343,9 @@ struct sock {
#define sk_rxhash __sk_common.skc_rxhash
socket_lock_t sk_lock;
+ atomic_t sk_drops;
+ int sk_rcvlowat;
+ struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@ -359,14 +362,13 @@ struct sock {
struct sk_buff *tail;
} sk_backlog;
#define sk_rmem_alloc sk_backlog.rmem_alloc
- int sk_forward_alloc;
- __u32 sk_txhash;
+ int sk_forward_alloc;
#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int sk_napi_id;
unsigned int sk_ll_usec;
+ /* ===== mostly read cache line ===== */
+ unsigned int sk_napi_id;
#endif
- atomic_t sk_drops;
int sk_rcvbuf;
struct sk_filter __rcu *sk_filter;
@@ -379,11 +381,30 @@ struct sock {
#endif
struct dst_entry *sk_rx_dst;
struct dst_entry __rcu *sk_dst_cache;
- /* Note: 32bit hole on 64bit arches */
- atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
int sk_sndbuf;
+
+ /* ===== cache line for TX ===== */
+ int sk_wmem_queued;
+ atomic_t sk_wmem_alloc;
+ unsigned long sk_tsq_flags;
+ struct sk_buff *sk_send_head;
struct sk_buff_head sk_write_queue;
+ __s32 sk_peek_off;
+ int sk_write_pending;
+ long sk_sndtimeo;
+ struct timer_list sk_timer;
+ __u32 sk_priority;
+ __u32 sk_mark;
+ u32 sk_pacing_rate; /* bytes per second */
+ u32 sk_max_pacing_rate;
+ struct page_frag sk_frag;
+ netdev_features_t sk_route_caps;
+ netdev_features_t sk_route_nocaps;
+ int sk_gso_type;
+ unsigned int sk_gso_max_size;
+ gfp_t sk_allocation;
+ __u32 sk_txhash;
/*
* Because of non atomicity rules, all
@@ -414,42 +435,24 @@ struct sock {
#define SK_PROTOCOL_MAX U8_MAX
kmemcheck_bitfield_end(flags);
- int sk_wmem_queued;
- gfp_t sk_allocation;
- u32 sk_pacing_rate; /* bytes per second */
- u32 sk_max_pacing_rate;
- netdev_features_t sk_route_caps;
- netdev_features_t sk_route_nocaps;
- int sk_gso_type;
- unsigned int sk_gso_max_size;
u16 sk_gso_max_segs;
- int sk_rcvlowat;
unsigned long sk_lingertime;
- struct sk_buff_head sk_error_queue;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
- __u32 sk_priority;
- __u32 sk_mark;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
- long sk_sndtimeo;
- struct timer_list sk_timer;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
struct socket *sk_socket;
void *sk_user_data;
- struct page_frag sk_frag;
- struct sk_buff *sk_send_head;
- __s32 sk_peek_off;
- int sk_write_pending;
#ifdef CONFIG_SECURITY
void *sk_security;
#endif
@@ -910,7 +913,17 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
- sock_rps_record_flow_hash(sk->sk_rxhash);
+ /* Reading sk->sk_rxhash might incur an expensive cache line miss.
+ *
+ * TCP_ESTABLISHED does cover almost all states where RFS
+ * might be useful, and is cheaper [1] than testing :
+ * IPv4: inet_sk(sk)->inet_daddr
+ * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED)
+ sock_rps_record_flow_hash(sk->sk_rxhash);
#endif
}
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index e601c8c3bdc7..1158a043342a 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -31,7 +31,7 @@
#include <linux/hdlc/ioctl.h>
/* For glibc compatibility. An empty enum does not compile. */
-#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 || \
__UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
/**
* enum net_device_flags - &struct net_device flags
@@ -99,7 +99,7 @@ enum net_device_flags {
IFF_ECHO = 1<<18, /* volatile */
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
};
-#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 || __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
/* for compatibility with glibc net/if.h */
#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild
index cd26d7a0fd07..03f194aeadc5 100644
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -5,6 +5,7 @@ header-y += nf_conntrack_ftp.h
header-y += nf_conntrack_sctp.h
header-y += nf_conntrack_tcp.h
header-y += nf_conntrack_tuple_common.h
+header-y += nf_log.h
header-y += nf_tables.h
header-y += nf_tables_compat.h
header-y += nf_nat.h
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 86786d45ee66..1adc0b654996 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -397,6 +397,7 @@ enum {
TCA_BPF_NAME,
TCA_BPF_FLAGS,
TCA_BPF_FLAGS_GEN,
+ TCA_BPF_DIGEST,
__TCA_BPF_MAX,
};
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 9611c7b6c18f..e3db7403296f 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -12,3 +12,4 @@ header-y += tc_bpf.h
header-y += tc_connmark.h
header-y += tc_ife.h
header-y += tc_tunnel_key.h
+header-y += tc_skbmod.h
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 063d9d465119..a6b88a6f7f71 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -27,6 +27,7 @@ enum {
TCA_ACT_BPF_FD,
TCA_ACT_BPF_NAME,
TCA_ACT_BPF_PAD,
+ TCA_ACT_BPF_DIGEST,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 82a04143368e..bdcc9f4ba767 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -136,6 +136,71 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp);
}
+#define SHA_BPF_RAW_SIZE \
+ round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
+
+/* Called under verifier mutex. */
+void bpf_prog_calc_digest(struct bpf_prog *fp)
+{
+ const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
+ static u32 ws[SHA_WORKSPACE_WORDS];
+ static u8 raw[SHA_BPF_RAW_SIZE];
+ struct bpf_insn *dst = (void *)raw;
+ u32 i, bsize, psize, blocks;
+ bool was_ld_map;
+ u8 *todo = raw;
+ __be32 *result;
+ __be64 *bits;
+
+ sha_init(fp->digest);
+ memset(ws, 0, sizeof(ws));
+
+ /* We need to take out the map fd for the digest calculation
+ * since they are unstable from user space side.
+ */
+ for (i = 0, was_ld_map = false; i < fp->len; i++) {
+ dst[i] = fp->insnsi[i];
+ if (!was_ld_map &&
+ dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
+ dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
+ was_ld_map = true;
+ dst[i].imm = 0;
+ } else if (was_ld_map &&
+ dst[i].code == 0 &&
+ dst[i].dst_reg == 0 &&
+ dst[i].src_reg == 0 &&
+ dst[i].off == 0) {
+ was_ld_map = false;
+ dst[i].imm = 0;
+ } else {
+ was_ld_map = false;
+ }
+ }
+
+ psize = fp->len * sizeof(struct bpf_insn);
+ memset(&raw[psize], 0, sizeof(raw) - psize);
+ raw[psize++] = 0x80;
+
+ bsize = round_up(psize, SHA_MESSAGE_BYTES);
+ blocks = bsize / SHA_MESSAGE_BYTES;
+ if (bsize - psize >= sizeof(__be64)) {
+ bits = (__be64 *)(todo + bsize - sizeof(__be64));
+ } else {
+ bits = (__be64 *)(todo + bsize + bits_offset);
+ blocks++;
+ }
+ *bits = cpu_to_be64((psize - 1) << 3);
+
+ while (blocks--) {
+ sha_transform(fp->digest, todo, ws);
+ todo += SHA_MESSAGE_BYTES;
+ }
+
+ result = (__force __be32 *)fp->digest;
+ for (i = 0; i < SHA_DIGEST_WORDS; i++)
+ result[i] = cpu_to_be32(fp->digest[i]);
+}
+
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_JMP &&
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 85af86c496cd..88f609f1c0c3 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -662,8 +662,30 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
return 0;
}
+#ifdef CONFIG_PROC_FS
+static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
+{
+ const struct bpf_prog *prog = filp->private_data;
+ char prog_digest[sizeof(prog->digest) * 2 + 1] = { };
+
+ bin2hex(prog_digest, prog->digest, sizeof(prog->digest));
+ seq_printf(m,
+ "prog_type:\t%u\n"
+ "prog_jited:\t%u\n"
+ "prog_digest:\t%s\n"
+ "memlock:\t%llu\n",
+ prog->type,
+ prog->jited,
+ prog_digest,
+ prog->pages * 1ULL << PAGE_SHIFT);
+}
+#endif
+
static const struct file_operations bpf_prog_fops = {
- .release = bpf_prog_release,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = bpf_prog_show_fdinfo,
+#endif
+ .release = bpf_prog_release,
};
int bpf_prog_new_fd(struct bpf_prog *prog)
@@ -764,8 +786,8 @@ static int bpf_prog_load(union bpf_attr *attr)
/* eBPF programs must be GPL compatible to use GPL-ed functions */
is_gpl = license_is_gpl_compatible(license);
- if (attr->insn_cnt >= BPF_MAXINSNS)
- return -EINVAL;
+ if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
+ return -E2BIG;
if (type == BPF_PROG_TYPE_KPROBE &&
attr->kern_version != LINUX_VERSION_CODE)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 0e742210750e..da9fb2a9b7eb 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1481,14 +1481,19 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
struct bpf_reg_state *src_reg = &regs[insn->src_reg];
u8 opcode = BPF_OP(insn->code);
- /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
- * Don't care about overflow or negative values, just add them
+ /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or'
+ * insn. Don't care about overflow or negative values, just add them
*/
if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
dst_reg->imm += insn->imm;
else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
src_reg->type == CONST_IMM)
dst_reg->imm += src_reg->imm;
+ else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K)
+ dst_reg->imm |= insn->imm;
+ else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM)
+ dst_reg->imm |= src_reg->imm;
else
mark_reg_unknown_value(regs, insn->dst_reg);
return 0;
@@ -3128,9 +3133,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
struct bpf_verifier_env *env;
int ret = -EINVAL;
- if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
- return -E2BIG;
-
/* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
@@ -3171,6 +3173,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
log_level = 0;
}
+ bpf_prog_calc_digest(env->prog);
+
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 7e4fad75acaa..150242ccfcd2 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -89,6 +89,7 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
*mult = tmp;
*shift = sft;
}
+EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
/*[Clocksource internal variables]---------
* curr_clocksource:
diff --git a/mm/shmem.c b/mm/shmem.c
index 166ebf5d2bce..9d32e1cb9f38 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1848,6 +1848,18 @@ unlock:
return error;
}
+/*
+ * This is like autoremove_wake_function, but it removes the wait queue
+ * entry unconditionally - even if something else had already woken the
+ * target.
+ */
+static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ int ret = default_wake_function(wait, mode, sync, key);
+ list_del_init(&wait->task_list);
+ return ret;
+}
+
static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct inode *inode = file_inode(vma->vm_file);
@@ -1883,7 +1895,7 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
vmf->pgoff >= shmem_falloc->start &&
vmf->pgoff < shmem_falloc->next) {
wait_queue_head_t *shmem_falloc_waitq;
- DEFINE_WAIT(shmem_fault_wait);
+ DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
ret = VM_FAULT_NOPAGE;
if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
@@ -2665,6 +2677,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
spin_lock(&inode->i_lock);
inode->i_private = NULL;
wake_up_all(&shmem_falloc_waitq);
+ WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list));
spin_unlock(&inode->i_lock);
error = 0;
goto out;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 76fda2268148..d75cdf360730 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2354,6 +2354,8 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
}
}
+ cond_resched();
+
if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
continue;
diff --git a/mm/workingset.c b/mm/workingset.c
index 617475f529f4..fb1f9183d89a 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -348,7 +348,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
local_irq_enable();
- if (memcg_kmem_enabled()) {
+ if (sc->memcg) {
pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
LRU_ALL_FILE);
} else {
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 447f9490b692..30ecbfb40adf 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -3287,7 +3287,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
&tvlv_tt_data,
&tt_change,
&tt_len);
- if (!tt_len)
+ if (!tt_len || !tvlv_len)
goto unlock;
/* Copy the last orig_node's OGM buffer */
@@ -3305,7 +3305,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
&tvlv_tt_data,
&tt_change,
&tt_len);
- if (!tt_len)
+ if (!tt_len || !tvlv_len)
goto out;
/* fill the rest of the tvlv with the real TT entries */
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c9d2e0abfb89..a18148213b08 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -938,6 +938,7 @@ int br_sysfs_addbr(struct net_device *dev)
if (!br->ifobj) {
pr_info("%s: can't add kobject (directory) %s/%s\n",
__func__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR);
+ err = -ENOMEM;
goto out3;
}
return 0;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index aa209b1066c9..92cbbd2afddb 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -1107,10 +1107,7 @@ static struct net_proto_family caif_family_ops = {
static int __init caif_sktinit_module(void)
{
- int err = sock_register(&caif_family_ops);
- if (!err)
- return err;
- return 0;
+ return sock_register(&caif_family_ops);
}
static void __exit caif_sktexit_module(void)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 49816af8586b..9482037a5c8c 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -214,6 +214,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
if (error)
goto no_packet;
+ *peeked = 0;
do {
/* Again only user level code calls this function, so nothing
* interrupt level will suddenly eat the receive_queue.
@@ -227,22 +228,22 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
spin_lock_irqsave(&queue->lock, cpu_flags);
skb_queue_walk(queue, skb) {
*last = skb;
- *peeked = skb->peeked;
if (flags & MSG_PEEK) {
if (_off >= skb->len && (skb->len || _off ||
skb->peeked)) {
_off -= skb->len;
continue;
}
-
- skb = skb_set_peeked(skb);
- error = PTR_ERR(skb);
- if (IS_ERR(skb)) {
- spin_unlock_irqrestore(&queue->lock,
- cpu_flags);
- goto no_packet;
+ if (!skb->len) {
+ skb = skb_set_peeked(skb);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ spin_unlock_irqrestore(&queue->lock,
+ cpu_flags);
+ goto no_packet;
+ }
}
-
+ *peeked = 1;
atomic_inc(&skb->users);
} else {
__skb_unlink(skb, queue);
diff --git a/net/core/filter.c b/net/core/filter.c
index 56b43587d200..b751202e12f8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2748,7 +2748,7 @@ lwt_xmit_func_proto(enum bpf_func_id func_id)
}
}
-static bool __is_valid_access(int off, int size, enum bpf_access_type type)
+static bool __is_valid_access(int off, int size)
{
if (off < 0 || off >= sizeof(struct __sk_buff))
return false;
@@ -2782,7 +2782,7 @@ static bool sk_filter_is_valid_access(int off, int size,
}
}
- return __is_valid_access(off, size, type);
+ return __is_valid_access(off, size);
}
static bool lwt_is_valid_access(int off, int size,
@@ -2815,7 +2815,7 @@ static bool lwt_is_valid_access(int off, int size,
break;
}
- return __is_valid_access(off, size, type);
+ return __is_valid_access(off, size);
}
static bool sock_filter_is_valid_access(int off, int size,
@@ -2833,11 +2833,9 @@ static bool sock_filter_is_valid_access(int off, int size,
if (off < 0 || off + size > sizeof(struct bpf_sock))
return false;
-
/* The verifier guarantees that size > 0. */
if (off % size != 0)
return false;
-
if (size != sizeof(__u32))
return false;
@@ -2910,11 +2908,10 @@ static bool tc_cls_act_is_valid_access(int off, int size,
break;
}
- return __is_valid_access(off, size, type);
+ return __is_valid_access(off, size);
}
-static bool __is_valid_xdp_access(int off, int size,
- enum bpf_access_type type)
+static bool __is_valid_xdp_access(int off, int size)
{
if (off < 0 || off >= sizeof(struct xdp_md))
return false;
@@ -2942,7 +2939,7 @@ static bool xdp_is_valid_access(int off, int size,
break;
}
- return __is_valid_xdp_access(off, size, type);
+ return __is_valid_xdp_access(off, size);
}
void bpf_warn_invalid_xdp_action(u32 act)
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 0993844faeea..101b5d0e2142 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -7,6 +7,7 @@
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ * Eric Dumazet <edumazet@google.com>
*
* Changes:
* Jamal Hadi Salim - moved it to net/core and reshulfed
@@ -30,171 +31,79 @@
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
-#include <linux/rbtree.h>
#include <linux/slab.h>
+#include <linux/seqlock.h>
#include <net/sock.h>
#include <net/gen_stats.h>
-/*
- This code is NOT intended to be used for statistics collection,
- its purpose is to provide a base for statistical multiplexing
- for controlled load service.
- If you need only statistics, run a user level daemon which
- periodically reads byte counters.
-
- Unfortunately, rate estimation is not a very easy task.
- F.e. I did not find a simple way to estimate the current peak rate
- and even failed to formulate the problem 8)8)
-
- So I preferred not to built an estimator into the scheduler,
- but run this task separately.
- Ideally, it should be kernel thread(s), but for now it runs
- from timers, which puts apparent top bounds on the number of rated
- flows, has minimal overhead on small, but is enough
- to handle controlled load service, sets of aggregates.
-
- We measure rate over A=(1<<interval) seconds and evaluate EWMA:
-
- avrate = avrate*(1-W) + rate*W
-
- where W is chosen as negative power of 2: W = 2^(-ewma_log)
-
- The resulting time constant is:
-
- T = A/(-ln(1-W))
-
-
- NOTES.
-
- * avbps and avpps are scaled by 2^5.
- * both values are reported as 32 bit unsigned values. bps can
- overflow for fast links : max speed being 34360Mbit/sec
- * Minimal interval is HZ/4=250msec (it is the greatest common divisor
- for HZ=100 and HZ=1024 8)), maximal interval
- is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
- are too expensive, longer ones can be implemented
- at user level painlessly.
+/* This code is NOT intended to be used for statistics collection,
+ * its purpose is to provide a base for statistical multiplexing
+ * for controlled load service.
+ * If you need only statistics, run a user level daemon which
+ * periodically reads byte counters.
*/
-#define EST_MAX_INTERVAL 5
-
-struct gen_estimator {
- struct list_head list;
+struct net_rate_estimator {
struct gnet_stats_basic_packed *bstats;
- struct gnet_stats_rate_est64 *rate_est;
spinlock_t *stats_lock;
seqcount_t *running;
- int ewma_log;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ u8 ewma_log;
+ u8 intvl_log; /* period : (250ms << intvl_log) */
+
+ seqcount_t seq;
u32 last_packets;
- unsigned long avpps;
u64 last_bytes;
+
+ u64 avpps;
u64 avbps;
- struct rcu_head e_rcu;
- struct rb_node node;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
- struct rcu_head head;
-};
-struct gen_estimator_head {
- unsigned long next_jiffies;
- struct timer_list timer;
- struct list_head list;
+ unsigned long next_jiffies;
+ struct timer_list timer;
+ struct rcu_head rcu;
};
-static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
-
-/* Protects against NULL dereference */
-static DEFINE_RWLOCK(est_lock);
-
-/* Protects against soft lockup during large deletion */
-static struct rb_root est_root = RB_ROOT;
-static DEFINE_SPINLOCK(est_tree_lock);
-
-static void est_timer(unsigned long arg)
+static void est_fetch_counters(struct net_rate_estimator *e,
+ struct gnet_stats_basic_packed *b)
{
- int idx = (int)arg;
- struct gen_estimator *e;
+ if (e->stats_lock)
+ spin_lock(e->stats_lock);
- rcu_read_lock();
- list_for_each_entry_rcu(e, &elist[idx].list, list) {
- struct gnet_stats_basic_packed b = {0};
- unsigned long rate;
- u64 brate;
-
- if (e->stats_lock)
- spin_lock(e->stats_lock);
- read_lock(&est_lock);
- if (e->bstats == NULL)
- goto skip;
-
- __gnet_stats_copy_basic(e->running, &b, e->cpu_bstats, e->bstats);
-
- brate = (b.bytes - e->last_bytes)<<(7 - idx);
- e->last_bytes = b.bytes;
- e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
- WRITE_ONCE(e->rate_est->bps, (e->avbps + 0xF) >> 5);
-
- rate = b.packets - e->last_packets;
- rate <<= (7 - idx);
- e->last_packets = b.packets;
- e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
- WRITE_ONCE(e->rate_est->pps, (e->avpps + 0xF) >> 5);
-skip:
- read_unlock(&est_lock);
- if (e->stats_lock)
- spin_unlock(e->stats_lock);
- }
+ __gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats);
- if (!list_empty(&elist[idx].list)) {
- elist[idx].next_jiffies += ((HZ/4) << idx);
+ if (e->stats_lock)
+ spin_unlock(e->stats_lock);
- if (unlikely(time_after_eq(jiffies, elist[idx].next_jiffies))) {
- /* Ouch... timer was delayed. */
- elist[idx].next_jiffies = jiffies + 1;
- }
- mod_timer(&elist[idx].timer, elist[idx].next_jiffies);
- }
- rcu_read_unlock();
}
-static void gen_add_node(struct gen_estimator *est)
+static void est_timer(unsigned long arg)
{
- struct rb_node **p = &est_root.rb_node, *parent = NULL;
+ struct net_rate_estimator *est = (struct net_rate_estimator *)arg;
+ struct gnet_stats_basic_packed b;
+ u64 rate, brate;
- while (*p) {
- struct gen_estimator *e;
+ est_fetch_counters(est, &b);
+ brate = (b.bytes - est->last_bytes) << (8 - est->ewma_log);
+ brate -= (est->avbps >> est->ewma_log);
- parent = *p;
- e = rb_entry(parent, struct gen_estimator, node);
+ rate = (u64)(b.packets - est->last_packets) << (8 - est->ewma_log);
+ rate -= (est->avpps >> est->ewma_log);
- if (est->bstats > e->bstats)
- p = &parent->rb_right;
- else
- p = &parent->rb_left;
- }
- rb_link_node(&est->node, parent, p);
- rb_insert_color(&est->node, &est_root);
-}
-
-static
-struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
- const struct gnet_stats_rate_est64 *rate_est)
-{
- struct rb_node *p = est_root.rb_node;
+ write_seqcount_begin(&est->seq);
+ est->avbps += brate;
+ est->avpps += rate;
+ write_seqcount_end(&est->seq);
- while (p) {
- struct gen_estimator *e;
+ est->last_bytes = b.bytes;
+ est->last_packets = b.packets;
- e = rb_entry(p, struct gen_estimator, node);
+ est->next_jiffies += ((HZ/4) << est->intvl_log);
- if (bstats > e->bstats)
- p = p->rb_right;
- else if (bstats < e->bstats || rate_est != e->rate_est)
- p = p->rb_left;
- else
- return e;
+ if (unlikely(time_after_eq(jiffies, est->next_jiffies))) {
+ /* Ouch... timer was delayed. */
+ est->next_jiffies = jiffies + 1;
}
- return NULL;
+ mod_timer(&est->timer, est->next_jiffies);
}
/**
@@ -217,84 +126,76 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
*/
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
seqcount_t *running,
struct nlattr *opt)
{
- struct gen_estimator *est;
struct gnet_estimator *parm = nla_data(opt);
- struct gnet_stats_basic_packed b = {0};
- int idx;
+ struct net_rate_estimator *old, *est;
+ struct gnet_stats_basic_packed b;
+ int intvl_log;
if (nla_len(opt) < sizeof(*parm))
return -EINVAL;
+ /* allowed timer periods are :
+ * -2 : 250ms, -1 : 500ms, 0 : 1 sec
+ * 1 : 2 sec, 2 : 4 sec, 3 : 8 sec
+ */
if (parm->interval < -2 || parm->interval > 3)
return -EINVAL;
est = kzalloc(sizeof(*est), GFP_KERNEL);
- if (est == NULL)
+ if (!est)
return -ENOBUFS;
- __gnet_stats_copy_basic(running, &b, cpu_bstats, bstats);
-
- idx = parm->interval + 2;
+ seqcount_init(&est->seq);
+ intvl_log = parm->interval + 2;
est->bstats = bstats;
- est->rate_est = rate_est;
est->stats_lock = stats_lock;
est->running = running;
est->ewma_log = parm->ewma_log;
- est->last_bytes = b.bytes;
- est->avbps = rate_est->bps<<5;
- est->last_packets = b.packets;
- est->avpps = rate_est->pps<<10;
+ est->intvl_log = intvl_log;
est->cpu_bstats = cpu_bstats;
- spin_lock_bh(&est_tree_lock);
- if (!elist[idx].timer.function) {
- INIT_LIST_HEAD(&elist[idx].list);
- setup_timer(&elist[idx].timer, est_timer, idx);
+ est_fetch_counters(est, &b);
+ est->last_bytes = b.bytes;
+ est->last_packets = b.packets;
+ old = rcu_dereference_protected(*rate_est, 1);
+ if (old) {
+ del_timer_sync(&old->timer);
+ est->avbps = old->avbps;
+ est->avpps = old->avpps;
}
- if (list_empty(&elist[idx].list)) {
- elist[idx].next_jiffies = jiffies + ((HZ/4) << idx);
- mod_timer(&elist[idx].timer, elist[idx].next_jiffies);
- }
- list_add_rcu(&est->list, &elist[idx].list);
- gen_add_node(est);
- spin_unlock_bh(&est_tree_lock);
+ est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
+ setup_timer(&est->timer, est_timer, (unsigned long)est);
+ mod_timer(&est->timer, est->next_jiffies);
+ rcu_assign_pointer(*rate_est, est);
+ if (old)
+ kfree_rcu(old, rcu);
return 0;
}
EXPORT_SYMBOL(gen_new_estimator);
/**
* gen_kill_estimator - remove a rate estimator
- * @bstats: basic statistics
- * @rate_est: rate estimator statistics
+ * @rate_est: rate estimator
*
- * Removes the rate estimator specified by &bstats and &rate_est.
+ * Removes the rate estimator.
*
- * Note : Caller should respect an RCU grace period before freeing stats_lock
*/
-void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est64 *rate_est)
+void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
{
- struct gen_estimator *e;
-
- spin_lock_bh(&est_tree_lock);
- while ((e = gen_find_node(bstats, rate_est))) {
- rb_erase(&e->node, &est_root);
+ struct net_rate_estimator *est;
- write_lock(&est_lock);
- e->bstats = NULL;
- write_unlock(&est_lock);
-
- list_del_rcu(&e->list);
- kfree_rcu(e, e_rcu);
+ est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
+ if (est) {
+ del_timer_sync(&est->timer);
+ kfree_rcu(est, rcu);
}
- spin_unlock_bh(&est_tree_lock);
}
EXPORT_SYMBOL(gen_kill_estimator);
@@ -314,33 +215,47 @@ EXPORT_SYMBOL(gen_kill_estimator);
*/
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt)
{
- gen_kill_estimator(bstats, rate_est);
- return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
+ return gen_new_estimator(bstats, cpu_bstats, rate_est,
+ stats_lock, running, opt);
}
EXPORT_SYMBOL(gen_replace_estimator);
/**
* gen_estimator_active - test if estimator is currently in use
- * @bstats: basic statistics
- * @rate_est: rate estimator statistics
+ * @rate_est: rate estimator
*
* Returns true if estimator is active, and false if not.
*/
-bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
- const struct gnet_stats_rate_est64 *rate_est)
+bool gen_estimator_active(struct net_rate_estimator __rcu **rate_est)
{
- bool res;
+ return !!rcu_access_pointer(*rate_est);
+}
+EXPORT_SYMBOL(gen_estimator_active);
- ASSERT_RTNL();
+bool gen_estimator_read(struct net_rate_estimator __rcu **rate_est,
+ struct gnet_stats_rate_est64 *sample)
+{
+ struct net_rate_estimator *est;
+ unsigned seq;
+
+ rcu_read_lock();
+ est = rcu_dereference(*rate_est);
+ if (!est) {
+ rcu_read_unlock();
+ return false;
+ }
- spin_lock_bh(&est_tree_lock);
- res = gen_find_node(bstats, rate_est) != NULL;
- spin_unlock_bh(&est_tree_lock);
+ do {
+ seq = read_seqcount_begin(&est->seq);
+ sample->bps = est->avbps >> 8;
+ sample->pps = est->avpps >> 8;
+ } while (read_seqcount_retry(&est->seq, seq));
- return res;
+ rcu_read_unlock();
+ return true;
}
-EXPORT_SYMBOL(gen_estimator_active);
+EXPORT_SYMBOL(gen_estimator_read);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 508e051304fb..87f28557b329 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -194,8 +194,7 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
/**
* gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
* @d: dumping handle
- * @b: basic statistics
- * @r: rate estimator statistics
+ * @rate_est: rate estimator
*
* Appends the rate estimator statistics to the top level TLV created by
* gnet_stats_start_copy().
@@ -205,18 +204,17 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
*/
int
gnet_stats_copy_rate_est(struct gnet_dump *d,
- const struct gnet_stats_basic_packed *b,
- struct gnet_stats_rate_est64 *r)
+ struct net_rate_estimator __rcu **rate_est)
{
+ struct gnet_stats_rate_est64 sample;
struct gnet_stats_rate_est est;
int res;
- if (b && !gen_estimator_active(b, r))
+ if (!gen_estimator_read(rate_est, &sample))
return 0;
-
- est.bps = min_t(u64, UINT_MAX, r->bps);
+ est.bps = min_t(u64, UINT_MAX, sample.bps);
/* we have some time before reaching 2^32 packets per second */
- est.pps = r->pps;
+ est.pps = sample.pps;
if (d->compat_tc_stats) {
d->tc_stats.bps = est.bps;
@@ -226,11 +224,11 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
if (d->tail) {
res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
TCA_STATS_PAD);
- if (res < 0 || est.bps == r->bps)
+ if (res < 0 || est.bps == sample.bps)
return res;
/* emit 64bit stats only if needed */
- return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r),
- TCA_STATS_PAD);
+ return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample,
+ sizeof(sample), TCA_STATS_PAD);
}
return 0;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 4f6c1862dfd2..3202d75329b5 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1353,6 +1353,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
dcb_unlock:
spin_unlock_bh(&dcb_lock);
nla_put_failure:
+ err = -EMSGSIZE;
return err;
}
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 77d73014bde3..dc2960be51e0 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -286,9 +286,12 @@ int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info)
if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0')
return -EINVAL; /* name should be null-terminated */
+ rc = -ENODEV;
dev = dev_get_by_name(genl_info_net(info), name);
if (!dev)
- return -ENODEV;
+ return rc;
+ if (dev->type != ARPHRD_IEEE802154)
+ goto out;
phy = dev->ieee802154_ptr->wpan_phy;
BUG_ON(!phy);
@@ -342,6 +345,7 @@ nla_put_failure:
nlmsg_free(msg);
out_dev:
wpan_phy_put(phy);
+out:
if (dev)
dev_put(dev);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 73a62700b00a..1b0e7d1f5217 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -808,6 +808,13 @@ static unsigned char update_suffix(struct key_vector *tn)
{
unsigned char slen = tn->pos;
unsigned long stride, i;
+ unsigned char slen_max;
+
+ /* only vector 0 can have a suffix length greater than or equal to
+ * tn->pos + tn->bits, the second highest node will have a suffix
+ * length at most of tn->pos + tn->bits - 1
+ */
+ slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen);
/* search though the list of children looking for nodes that might
* have a suffix greater than the one we currently have. This is
@@ -825,12 +832,8 @@ static unsigned char update_suffix(struct key_vector *tn)
slen = n->slen;
i &= ~(stride - 1);
- /* if slen covers all but the last bit we can stop here
- * there will be nothing longer than that since only node
- * 0 and 1 << (bits - 1) could have that as their suffix
- * length.
- */
- if ((slen + 1) >= (tn->pos + tn->bits))
+ /* stop searching if we have hit the maximum possible value */
+ if (slen >= slen_max)
break;
}
@@ -1002,39 +1005,27 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn)
return collapse(t, tn);
/* update parent in case halve failed */
- tp = node_parent(tn);
-
- /* Return if at least one deflate was run */
- if (max_work != MAX_WORK)
- return tp;
-
- /* push the suffix length to the parent node */
- if (tn->slen > tn->pos) {
- unsigned char slen = update_suffix(tn);
-
- if (slen > tp->slen)
- tp->slen = slen;
- }
-
- return tp;
+ return node_parent(tn);
}
-static void leaf_pull_suffix(struct key_vector *tp, struct key_vector *l)
+static void node_pull_suffix(struct key_vector *tn, unsigned char slen)
{
- while ((tp->slen > tp->pos) && (tp->slen > l->slen)) {
- if (update_suffix(tp) > l->slen)
+ unsigned char node_slen = tn->slen;
+
+ while ((node_slen > tn->pos) && (node_slen > slen)) {
+ slen = update_suffix(tn);
+ if (node_slen == slen)
break;
- tp = node_parent(tp);
+
+ tn = node_parent(tn);
+ node_slen = tn->slen;
}
}
-static void leaf_push_suffix(struct key_vector *tn, struct key_vector *l)
+static void node_push_suffix(struct key_vector *tn, unsigned char slen)
{
- /* if this is a new leaf then tn will be NULL and we can sort
- * out parent suffix lengths as a part of trie_rebalance
- */
- while (tn->slen < l->slen) {
- tn->slen = l->slen;
+ while (tn->slen < slen) {
+ tn->slen = slen;
tn = node_parent(tn);
}
}
@@ -1155,6 +1146,7 @@ static int fib_insert_node(struct trie *t, struct key_vector *tp,
}
/* Case 3: n is NULL, and will just insert a new leaf */
+ node_push_suffix(tp, new->fa_slen);
NODE_INIT_PARENT(l, tp);
put_child_root(tp, key, l);
trie_rebalance(t, tp);
@@ -1196,7 +1188,7 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp,
/* if we added to the tail node then we need to update slen */
if (l->slen < new->fa_slen) {
l->slen = new->fa_slen;
- leaf_push_suffix(tp, l);
+ node_push_suffix(tp, new->fa_slen);
}
return 0;
@@ -1588,6 +1580,8 @@ static void fib_remove_alias(struct trie *t, struct key_vector *tp,
* out parent suffix lengths as a part of trie_rebalance
*/
if (hlist_empty(&l->leaf)) {
+ if (tp->slen == l->slen)
+ node_pull_suffix(tp, tp->pos);
put_child_root(tp, l->key, NULL);
node_free(l);
trie_rebalance(t, tp);
@@ -1600,7 +1594,7 @@ static void fib_remove_alias(struct trie *t, struct key_vector *tp,
/* update the trie with the latest suffix length */
l->slen = fa->fa_slen;
- leaf_pull_suffix(tp, l);
+ node_pull_suffix(tp, fa->fa_slen);
}
/* Caller must hold RTNL. */
@@ -1872,6 +1866,10 @@ void fib_table_flush_external(struct fib_table *tb)
if (IS_TRIE(pn))
break;
+ /* update the suffix to address pulled leaves */
+ if (pn->slen > pn->pos)
+ update_suffix(pn);
+
/* resize completed node */
pn = resize(t, pn);
cindex = get_index(pkey, pn);
@@ -1938,6 +1936,10 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
if (IS_TRIE(pn))
break;
+ /* update the suffix to address pulled leaves */
+ if (pn->slen > pn->pos)
+ update_suffix(pn);
+
/* resize completed node */
pn = resize(t, pn);
cindex = get_index(pkey, pn);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index d11129f1178d..5b2635e69a92 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -657,6 +657,10 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
if (len > 0xFFFF)
return -EMSGSIZE;
+ /* Must have at least a full ICMP header. */
+ if (len < icmph_len)
+ return -EINVAL;
+
/*
* Check the flags.
*/
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1149b48700a1..1ef3165114ba 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -663,9 +663,9 @@ static void tcp_push(struct sock *sk, int flags, int mss_now,
if (tcp_should_autocork(sk, skb, size_goal)) {
/* avoid atomic op if TSQ_THROTTLED bit is already set */
- if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
+ if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
- set_bit(TSQ_THROTTLED, &tp->tsq_flags);
+ set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
}
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED.
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index bde22ebb92a8..5f5e5936760e 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -188,8 +188,8 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
static void dctcp_update_alpha(struct sock *sk, u32 flags)
{
+ const struct tcp_sock *tp = tcp_sk(sk);
struct dctcp *ca = inet_csk_ca(sk);
- struct tcp_sock *tp = tcp_sk(sk);
u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
/* If ack did not advance snd_una, count dupack as MSS size.
@@ -229,13 +229,6 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
WRITE_ONCE(ca->dctcp_alpha, alpha);
dctcp_reset(tp, ca);
}
-
- if (flags & CA_ACK_ECE) {
- unsigned int cwnd = dctcp_ssthresh(sk);
-
- if (cwnd != tp->snd_cwnd)
- tp->snd_cwnd = cwnd;
- }
}
static void dctcp_state(struct sock *sk, u8 new_state)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fe668c1b9ced..6c790754ae3e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -129,6 +129,23 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define REXMIT_LOST 1 /* retransmit packets marked lost */
#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
+static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
+{
+ static bool __once __read_mostly;
+
+ if (!__once) {
+ struct net_device *dev;
+
+ __once = true;
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
+ pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
+ dev ? dev->name : "Unknown driver");
+ rcu_read_unlock();
+ }
+}
+
/* Adapt the MSS value used to make delayed ack decision to the
* real world.
*/
@@ -145,7 +162,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
*/
len = skb_shinfo(skb)->gso_size ? : skb->len;
if (len >= icsk->icsk_ack.rcv_mss) {
- icsk->icsk_ack.rcv_mss = len;
+ icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
+ tcp_sk(sk)->advmss);
+ if (unlikely(icsk->icsk_ack.rcv_mss != len))
+ tcp_gro_dev_warn(sk, skb);
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b50f05905ced..30d81f533ada 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -443,7 +443,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
if (!sock_owned_by_user(sk)) {
tcp_v4_mtu_reduced(sk);
} else {
- if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
+ if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
sock_hold(sk);
}
goto out;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c7adcb57654e..b45101f3d2bd 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -769,25 +769,26 @@ static void tcp_tasklet_func(unsigned long data)
list_del(&tp->tsq_node);
sk = (struct sock *)tp;
- bh_lock_sock(sk);
-
- if (!sock_owned_by_user(sk)) {
- tcp_tsq_handler(sk);
- } else {
- /* defer the work to tcp_release_cb() */
- set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
+ clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
+
+ if (!sk->sk_lock.owned &&
+ test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) {
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
+ tcp_tsq_handler(sk);
+ }
+ bh_unlock_sock(sk);
}
- bh_unlock_sock(sk);
- clear_bit(TSQ_QUEUED, &tp->tsq_flags);
sk_free(sk);
}
}
-#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
- (1UL << TCP_WRITE_TIMER_DEFERRED) | \
- (1UL << TCP_DELACK_TIMER_DEFERRED) | \
- (1UL << TCP_MTU_REDUCED_DEFERRED))
+#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \
+ TCPF_WRITE_TIMER_DEFERRED | \
+ TCPF_DELACK_TIMER_DEFERRED | \
+ TCPF_MTU_REDUCED_DEFERRED)
/**
* tcp_release_cb - tcp release_sock() callback
* @sk: socket
@@ -797,18 +798,17 @@ static void tcp_tasklet_func(unsigned long data)
*/
void tcp_release_cb(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
unsigned long flags, nflags;
/* perform an atomic operation only if at least one flag is set */
do {
- flags = tp->tsq_flags;
+ flags = sk->sk_tsq_flags;
if (!(flags & TCP_DEFERRED_ALL))
return;
nflags = flags & ~TCP_DEFERRED_ALL;
- } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
+ } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
- if (flags & (1UL << TCP_TSQ_DEFERRED))
+ if (flags & TCPF_TSQ_DEFERRED)
tcp_tsq_handler(sk);
/* Here begins the tricky part :
@@ -822,15 +822,15 @@ void tcp_release_cb(struct sock *sk)
*/
sock_release_ownership(sk);
- if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
+ if (flags & TCPF_WRITE_TIMER_DEFERRED) {
tcp_write_timer_handler(sk);
__sock_put(sk);
}
- if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
+ if (flags & TCPF_DELACK_TIMER_DEFERRED) {
tcp_delack_timer_handler(sk);
__sock_put(sk);
}
- if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
+ if (flags & TCPF_MTU_REDUCED_DEFERRED) {
inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
__sock_put(sk);
}
@@ -860,6 +860,7 @@ void tcp_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long flags, nval, oval;
int wmem;
/* Keep one reference on sk_wmem_alloc.
@@ -877,16 +878,25 @@ void tcp_wfree(struct sk_buff *skb)
if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
goto out;
- if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
- !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
- unsigned long flags;
+ for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
struct tsq_tasklet *tsq;
+ bool empty;
+
+ if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
+ goto out;
+
+ nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
+ nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
+ if (nval != oval)
+ continue;
/* queue this socket to tasklet queue */
local_irq_save(flags);
tsq = this_cpu_ptr(&tsq_tasklet);
+ empty = list_empty(&tsq->head);
list_add(&tp->tsq_node, &tsq->head);
- tasklet_schedule(&tsq->tasklet);
+ if (empty)
+ tasklet_schedule(&tsq->tasklet);
local_irq_restore(flags);
return;
}
@@ -1922,26 +1932,26 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
*/
static int tcp_mtu_probe(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb, *nskb, *next;
struct net *net = sock_net(sk);
- int len;
int probe_size;
int size_needed;
- int copy;
+ int copy, len;
int mss_now;
int interval;
/* Not currently probing/verifying,
* not in recovery,
* have enough cwnd, and
- * not SACKing (the variable headers throw things off) */
- if (!icsk->icsk_mtup.enabled ||
- icsk->icsk_mtup.probe_size ||
- inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
- tp->snd_cwnd < 11 ||
- tp->rx_opt.num_sacks || tp->rx_opt.dsack)
+ * not SACKing (the variable headers throw things off)
+ */
+ if (likely(!icsk->icsk_mtup.enabled ||
+ icsk->icsk_mtup.probe_size ||
+ inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
+ tp->snd_cwnd < 11 ||
+ tp->rx_opt.num_sacks || tp->rx_opt.dsack))
return -1;
/* Use binary search for probe_size between tcp_mss_base,
@@ -2081,7 +2091,16 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
limit <<= factor;
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
- set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags);
+ /* Always send the 1st or 2nd skb in write queue.
+ * No need to wait for TX completion to call us back,
+ * after softirq/tasklet schedule.
+ * This helps when TX completions are delayed too much.
+ */
+ if (skb == sk->sk_write_queue.next ||
+ skb->prev == sk->sk_write_queue.next)
+ return false;
+
+ set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
@@ -2222,6 +2241,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
+ if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
+ clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
if (tcp_small_queue_check(sk, skb, 0))
break;
@@ -3524,8 +3545,6 @@ void tcp_send_ack(struct sock *sk)
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
- * We also avoid tcp_wfree() overhead (cache line miss accessing
- * tp->tsq_flags) by using regular sock_wfree()
*/
skb_set_tcp_pure_ack(buff);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 3ea1cf804748..3705075f42c3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -310,7 +310,7 @@ static void tcp_delack_timer(unsigned long data)
inet_csk(sk)->icsk_ack.blocked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */
- if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+ if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
sock_hold(sk);
}
bh_unlock_sock(sk);
@@ -592,7 +592,7 @@ static void tcp_write_timer(unsigned long data)
tcp_write_timer_handler(sk);
} else {
/* delegate our work to tcp_release_cb() */
- if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+ if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
sock_hold(sk);
}
bh_unlock_sock(sk);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index aac7818e2e0f..2413a0637d99 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2000,8 +2000,11 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
It is very good, but in some (rare!) circumstances
(SIT, PtP, NBMA NOARP links) it is handy to allow
some exceptions. --ANK
+ We allow IPv4-mapped nexthops to support RFC4798-type
+ addressing
*/
- if (!(gwa_type & IPV6_ADDR_UNICAST))
+ if (!(gwa_type & (IPV6_ADDR_UNICAST |
+ IPV6_ADDR_MAPPED)))
goto out;
if (cfg->fc_table) {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a2185a214abc..73bc8fc68acd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -399,7 +399,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!sock_owned_by_user(sk))
tcp_v6_mtu_reduced(sk);
else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
- &tp->tsq_flags))
+ &sk->sk_tsq_flags))
sock_hold(sk);
goto out;
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 0e4334cbde17..15fe97644ffe 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1252,7 +1252,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!nla)
continue;
- switch(index) {
+ switch (index) {
case RTA_OIF:
cfg->rc_ifindex = nla_get_u32(nla);
break;
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index dbd6c4a12b97..91a373a3f534 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -63,7 +63,7 @@ void xt_rateest_put(struct xt_rateest *est)
mutex_lock(&xt_rateest_mutex);
if (--est->refcnt == 0) {
hlist_del(&est->list);
- gen_kill_estimator(&est->bstats, &est->rstats);
+ gen_kill_estimator(&est->rate_est);
/*
* gen_estimator est_timer() might access est->lock or bstats,
* wait a RCU grace period before freeing 'est'
@@ -132,7 +132,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
cfg.est.interval = info->interval;
cfg.est.ewma_log = info->ewma_log;
- ret = gen_new_estimator(&est->bstats, NULL, &est->rstats,
+ ret = gen_new_estimator(&est->bstats, NULL, &est->rate_est,
&est->lock, NULL, &cfg.opt);
if (ret < 0)
goto err2;
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 7720b036d76a..1db02f6fca54 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -18,35 +18,33 @@ static bool
xt_rateest_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rateest_match_info *info = par->matchinfo;
- struct gnet_stats_rate_est64 *r;
+ struct gnet_stats_rate_est64 sample = {0};
u_int32_t bps1, bps2, pps1, pps2;
bool ret = true;
- spin_lock_bh(&info->est1->lock);
- r = &info->est1->rstats;
+ gen_estimator_read(&info->est1->rate_est, &sample);
+
if (info->flags & XT_RATEEST_MATCH_DELTA) {
- bps1 = info->bps1 >= r->bps ? info->bps1 - r->bps : 0;
- pps1 = info->pps1 >= r->pps ? info->pps1 - r->pps : 0;
+ bps1 = info->bps1 >= sample.bps ? info->bps1 - sample.bps : 0;
+ pps1 = info->pps1 >= sample.pps ? info->pps1 - sample.pps : 0;
} else {
- bps1 = r->bps;
- pps1 = r->pps;
+ bps1 = sample.bps;
+ pps1 = sample.pps;
}
- spin_unlock_bh(&info->est1->lock);
if (info->flags & XT_RATEEST_MATCH_ABS) {
bps2 = info->bps2;
pps2 = info->pps2;
} else {
- spin_lock_bh(&info->est2->lock);
- r = &info->est2->rstats;
+ gen_estimator_read(&info->est2->rate_est, &sample);
+
if (info->flags & XT_RATEEST_MATCH_DELTA) {
- bps2 = info->bps2 >= r->bps ? info->bps2 - r->bps : 0;
- pps2 = info->pps2 >= r->pps ? info->pps2 - r->pps : 0;
+ bps2 = info->bps2 >= sample.bps ? info->bps2 - sample.bps : 0;
+ pps2 = info->pps2 >= sample.pps ? info->pps2 - sample.pps : 0;
} else {
- bps2 = r->bps;
- pps2 = r->pps;
+ bps2 = sample.bps;
+ pps2 = sample.pps;
}
- spin_unlock_bh(&info->est2->lock);
}
switch (info->mode) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 602e5ebe9db3..246f29d365c0 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -322,11 +322,13 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
-static void __netlink_sock_destruct(struct sock *sk)
+static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->cb_running) {
+ if (nlk->cb.done)
+ nlk->cb.done(&nlk->cb);
module_put(nlk->cb.module);
kfree_skb(nlk->cb.skb);
}
@@ -348,21 +350,7 @@ static void netlink_sock_destruct_work(struct work_struct *work)
struct netlink_sock *nlk = container_of(work, struct netlink_sock,
work);
- nlk->cb.done(&nlk->cb);
- __netlink_sock_destruct(&nlk->sk);
-}
-
-static void netlink_sock_destruct(struct sock *sk)
-{
- struct netlink_sock *nlk = nlk_sk(sk);
-
- if (nlk->cb_running && nlk->cb.done) {
- INIT_WORK(&nlk->work, netlink_sock_destruct_work);
- schedule_work(&nlk->work);
- return;
- }
-
- __netlink_sock_destruct(sk);
+ sk_free(&nlk->sk);
}
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
@@ -667,8 +655,18 @@ out_module:
static void deferred_put_nlk_sk(struct rcu_head *head)
{
struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
+ struct sock *sk = &nlk->sk;
+
+ if (!atomic_dec_and_test(&sk->sk_refcnt))
+ return;
+
+ if (nlk->cb_running && nlk->cb.done) {
+ INIT_WORK(&nlk->work, netlink_sock_destruct_work);
+ schedule_work(&nlk->work);
+ return;
+ }
- sock_put(&nlk->sk);
+ sk_free(sk);
}
static int netlink_release(struct socket *sock)
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f893d180da1c..2095c83ce773 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -41,8 +41,7 @@ static void tcf_hash_destroy(struct tcf_hashinfo *hinfo, struct tc_action *p)
spin_lock_bh(&hinfo->lock);
hlist_del(&p->tcfa_head);
spin_unlock_bh(&hinfo->lock);
- gen_kill_estimator(&p->tcfa_bstats,
- &p->tcfa_rate_est);
+ gen_kill_estimator(&p->tcfa_rate_est);
/*
* gen_estimator est_timer() might access p->tcfa_lock
* or bstats, wait a RCU grace period before freeing p
@@ -237,8 +236,7 @@ EXPORT_SYMBOL(tcf_hash_check);
void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
{
if (est)
- gen_kill_estimator(&a->tcfa_bstats,
- &a->tcfa_rate_est);
+ gen_kill_estimator(&a->tcfa_rate_est);
call_rcu(&a->tcfa_rcu, free_tcf);
}
EXPORT_SYMBOL(tcf_hash_cleanup);
@@ -670,8 +668,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
goto errout;
if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
- gnet_stats_copy_rate_est(&d, &p->tcfa_bstats,
- &p->tcfa_rate_est) < 0 ||
+ gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
gnet_stats_copy_queue(&d, p->cpu_qstats,
&p->tcfa_qstats,
p->tcfa_qstats.qlen) < 0)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 84c1d2da4f8b..1c60317f0121 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -117,10 +117,19 @@ static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
struct sk_buff *skb)
{
+ struct nlattr *nla;
+
if (prog->bpf_name &&
nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
+ nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST,
+ sizeof(prog->filter->digest));
+ if (nla == NULL)
+ return -EMSGSIZE;
+
+ memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
+
return 0;
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index c990b73a6c85..0ba91d1ce994 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -142,8 +142,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
goto failure_unlock;
} else if (tb[TCA_POLICE_AVRATE] &&
(ret == ACT_P_CREATED ||
- !gen_estimator_active(&police->tcf_bstats,
- &police->tcf_rate_est))) {
+ !gen_estimator_active(&police->tcf_rate_est))) {
err = -EINVAL;
goto failure_unlock;
}
@@ -216,13 +215,17 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
bstats_update(&police->tcf_bstats, skb);
tcf_lastuse_update(&police->tcf_tm);
- if (police->tcfp_ewma_rate &&
- police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
- police->tcf_qstats.overlimits++;
- if (police->tcf_action == TC_ACT_SHOT)
- police->tcf_qstats.drops++;
- spin_unlock(&police->tcf_lock);
- return police->tcf_action;
+ if (police->tcfp_ewma_rate) {
+ struct gnet_stats_rate_est64 sample;
+
+ if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
+ sample.bps >= police->tcfp_ewma_rate) {
+ police->tcf_qstats.overlimits++;
+ if (police->tcf_action == TC_ACT_SHOT)
+ police->tcf_qstats.drops++;
+ spin_unlock(&police->tcf_lock);
+ return police->tcf_action;
+ }
}
if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index c37aa8b77fb5..adc776048d1a 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -241,7 +241,7 @@ static int cls_bpf_init(struct tcf_proto *tp)
return 0;
}
-static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
+static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
{
tcf_exts_destroy(&prog->exts);
@@ -255,22 +255,22 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
kfree(prog);
}
-static void __cls_bpf_delete_prog(struct rcu_head *rcu)
+static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
{
- struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
-
- cls_bpf_delete_prog(prog->tp, prog);
+ __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
}
-static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
+static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
{
- struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
-
cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
- call_rcu(&prog->rcu, __cls_bpf_delete_prog);
+ call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
+}
+static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
+{
+ __cls_bpf_delete(tp, (struct cls_bpf_prog *) arg);
return 0;
}
@@ -282,12 +282,8 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
if (!force && !list_empty(&head->plist))
return false;
- list_for_each_entry_safe(prog, tmp, &head->plist, link) {
- cls_bpf_stop_offload(tp, prog);
- list_del_rcu(&prog->link);
- tcf_unbind_filter(tp, &prog->res);
- call_rcu(&prog->rcu, __cls_bpf_delete_prog);
- }
+ list_for_each_entry_safe(prog, tmp, &head->plist, link)
+ __cls_bpf_delete(tp, prog);
kfree_rcu(head, rcu);
return true;
@@ -511,14 +507,14 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
ret = cls_bpf_offload(tp, prog, oldprog);
if (ret) {
- cls_bpf_delete_prog(tp, prog);
+ __cls_bpf_delete_prog(prog);
return ret;
}
if (oldprog) {
list_replace_rcu(&oldprog->link, &prog->link);
tcf_unbind_filter(tp, &oldprog->res);
- call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
+ call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
} else {
list_add_rcu(&prog->link, &head->plist);
}
@@ -553,10 +549,18 @@ static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
struct sk_buff *skb)
{
+ struct nlattr *nla;
+
if (prog->bpf_name &&
nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
+ nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest));
+ if (nla == NULL)
+ return -EMSGSIZE;
+
+ memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
+
return 0;
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index c5cea78491dc..29a9e6d9f274 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -236,8 +236,11 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
int err;
if (!tc_can_offload(dev, tp)) {
- if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev))
+ if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
+ (f->hw_dev && !tc_can_offload(f->hw_dev, tp))) {
+ f->hw_dev = dev;
return tc_skip_sw(f->flags) ? -EINVAL : 0;
+ }
dev = f->hw_dev;
tc->egress_dev = true;
} else {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f337f1bdd1d4..d7b93429f0cc 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1395,7 +1395,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
&d, cpu_bstats, &q->bstats) < 0 ||
- gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
goto nla_put_failure;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index beb554aa8cfb..9ffe1c220b02 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -122,7 +122,7 @@ struct cbq_class {
psched_time_t penalized;
struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
- struct gnet_stats_rate_est64 rate_est;
+ struct net_rate_estimator __rcu *rate_est;
struct tc_cbq_xstats xstats;
struct tcf_proto __rcu *filter_list;
@@ -1346,7 +1346,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
return -1;
@@ -1405,7 +1405,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
tcf_destroy_chain(&cl->filter_list);
qdisc_destroy(cl->q);
qdisc_put_rtab(cl->R_tab);
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ gen_kill_estimator(&cl->rate_est);
if (cl != &q->link)
kfree(cl);
}
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 8af5c59eef84..bb4cbdf75004 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -25,7 +25,7 @@ struct drr_class {
struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
- struct gnet_stats_rate_est64 rate_est;
+ struct net_rate_estimator __rcu *rate_est;
struct list_head alist;
struct Qdisc *qdisc;
@@ -142,7 +142,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
{
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ gen_kill_estimator(&cl->rate_est);
qdisc_destroy(cl->qdisc);
kfree(cl);
}
@@ -283,7 +283,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
return -1;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6cfb6e9038c2..6eb9c8e88519 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -709,7 +709,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
qdisc_put_stab(rtnl_dereference(qdisc->stab));
#endif
- gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
+ gen_kill_estimator(&qdisc->rate_est);
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 000f1d36128e..3ffaa6fb0990 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -114,7 +114,7 @@ struct hfsc_class {
struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
- struct gnet_stats_rate_est64 rate_est;
+ struct net_rate_estimator __rcu *rate_est;
struct tcf_proto __rcu *filter_list; /* filter list */
unsigned int filter_cnt; /* filter count */
unsigned int level; /* class level in hierarchy */
@@ -1091,7 +1091,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
tcf_destroy_chain(&cl->filter_list);
qdisc_destroy(cl->qdisc);
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ gen_kill_estimator(&cl->rate_est);
if (cl != &q->root)
kfree(cl);
}
@@ -1348,7 +1348,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.rtwork = cl->cl_cumul;
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
return -1;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 9926fe4f3b6f..760f39e7caee 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -111,7 +111,7 @@ struct htb_class {
unsigned int children;
struct htb_class *parent; /* parent class */
- struct gnet_stats_rate_est64 rate_est;
+ struct net_rate_estimator __rcu *rate_est;
/*
* Written often fields
@@ -1145,7 +1145,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
return -1;
@@ -1228,7 +1228,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
WARN_ON(!cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q);
}
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ gen_kill_estimator(&cl->rate_est);
tcf_destroy_chain(&cl->filter_list);
kfree(cl);
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index ca0516e6f743..f9e712ce2d15 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -137,7 +137,7 @@ struct qfq_class {
struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
- struct gnet_stats_rate_est64 rate_est;
+ struct net_rate_estimator __rcu *rate_est;
struct Qdisc *qdisc;
struct list_head alist; /* Link for active-classes list. */
struct qfq_aggregate *agg; /* Parent aggregate. */
@@ -508,7 +508,7 @@ set_change_agg:
new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
if (new_agg == NULL) {
err = -ENOBUFS;
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ gen_kill_estimator(&cl->rate_est);
goto destroy_class;
}
sch_tree_lock(sch);
@@ -533,7 +533,7 @@ static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
struct qfq_sched *q = qdisc_priv(sch);
qfq_rm_from_agg(q, cl);
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ gen_kill_estimator(&cl->rate_est);
qdisc_destroy(cl->qdisc);
kfree(cl);
}
@@ -667,7 +667,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL,
&cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
return -1;
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 3fe4468ea2c5..52063b262667 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -1702,7 +1702,7 @@ interrupts are disabled.
static void xmit_descs(struct snd_dbri *dbri)
{
struct dbri_streaminfo *info;
- u32 dvma_addr = (u32)dbri->dma_dvma;
+ u32 dvma_addr;
s32 *cmd;
unsigned long flags;
int first_td;
@@ -1710,6 +1710,7 @@ static void xmit_descs(struct snd_dbri *dbri)
if (dbri == NULL)
return; /* Disabled */
+ dvma_addr = (u32)dbri->dma_dvma;
info = &dbri->stream_info[DBRI_REC];
spin_lock_irqsave(&dbri->lock, flags);
diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh
index 8e960234013d..4aa5369ffa4e 100755
--- a/tools/hv/bondvf.sh
+++ b/tools/hv/bondvf.sh
@@ -74,8 +74,8 @@ function create_eth_cfg_redhat {
echo DEVICE=$1 >>$fn
echo TYPE=Ethernet >>$fn
echo BOOTPROTO=none >>$fn
+ echo UUID=`uuidgen` >>$fn
echo ONBOOT=yes >>$fn
- echo NM_CONTROLLED=no >>$fn
echo PEERDNS=yes >>$fn
echo IPV6INIT=yes >>$fn
echo MASTER=$2 >>$fn
@@ -93,8 +93,8 @@ function create_bond_cfg_redhat {
echo DEVICE=$1 >>$fn
echo TYPE=Bond >>$fn
echo BOOTPROTO=dhcp >>$fn
+ echo UUID=`uuidgen` >>$fn
echo ONBOOT=yes >>$fn
- echo NM_CONTROLLED=no >>$fn
echo PEERDNS=yes >>$fn
echo IPV6INIT=yes >>$fn
echo BONDING_MASTER=yes >>$fn
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 3c59f96e3ed8..071431bedde8 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -1,2 +1,3 @@
test_verifier
test_maps
+test_lru_map
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 5da2e9d7689c..0103bf2e0c0d 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2683,6 +2683,200 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
},
+ {
+ "constant register |= constant should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "constant register |= constant should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "constant register |= constant register should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 13),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "constant register |= constant register should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 24),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "invalid direct packet write for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+ },
+ {
+ "invalid direct packet write for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+ },
+ {
+ "direct packet write for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+ },
+ {
+ "direct packet read for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+ },
+ {
+ "direct packet read for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+ },
+ {
+ "direct packet read for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+ },
+ {
+ "invalid access of tc_classid for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ },
+ {
+ "invalid access of tc_classid for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ },
+ {
+ "invalid access of tc_classid for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)