summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt5
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt4
-rw-r--r--Documentation/devicetree/bindings/net/keystone-netcp.txt6
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt75
-rw-r--r--Documentation/networking/ip-sysctl.txt40
-rw-r--r--Documentation/networking/stmmac.txt14
-rw-r--r--Documentation/networking/switchdev.txt14
-rw-r--r--Documentation/networking/timestamping.txt7
-rw-r--r--Documentation/networking/vxlan.txt52
-rw-r--r--MAINTAINERS18
-rw-r--r--arch/arm/net/bpf_jit_32.c41
-rw-r--r--arch/arm/net/bpf_jit_32.h3
-rw-r--r--arch/s390/net/bpf_jit.h5
-rw-r--r--arch/s390/net/bpf_jit_comp.c93
-rw-r--r--arch/sparc/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c88
-rw-r--r--drivers/bcma/main.c12
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c2
-rw-r--r--drivers/bluetooth/btbcm.c8
-rw-r--r--drivers/bluetooth/btintel.c80
-rw-r--r--drivers/bluetooth/btintel.h19
-rw-r--r--drivers/bluetooth/btmrvl_drv.h6
-rw-r--r--drivers/bluetooth/btusb.c94
-rw-r--r--drivers/bluetooth/dtl1_cs.c6
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_intel.c561
-rw-r--r--drivers/bluetooth/hci_ldisc.c8
-rw-r--r--drivers/bluetooth/hci_uart.h5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/isdn/mISDN/dsp_audio.c22
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/bonding/bond_netlink.c17
-rw-r--r--drivers/net/bonding/bond_options.c7
-rw-r--r--drivers/net/bonding/bond_sysfs.c20
-rw-r--r--drivers/net/dsa/Kconfig6
-rw-r--r--drivers/net/dsa/bcm_sf2.c18
-rw-r--r--drivers/net/dsa/mv88e6171.c6
-rw-r--r--drivers/net/dsa/mv88e6352.c109
-rw-r--r--drivers/net/dsa/mv88e6xxx.c408
-rw-r--r--drivers/net/dsa/mv88e6xxx.h42
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c8
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h1
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h57
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c98
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h69
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h204
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c473
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c325
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c358
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c212
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h37
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c36
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h21
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c79
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c111
-rw-r--r--drivers/net/ethernet/cadence/macb.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c134
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c476
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c83
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c94
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h141
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c113
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c135
-rw-r--r--drivers/net/ethernet/ec_bhf.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c71
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c69
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c41
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c502
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h77
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_mdio.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c145
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h18
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c65
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h71
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h72
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c407
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c97
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c695
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c135
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h1931
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c259
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h76
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c74
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c378
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h62
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c199
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h58
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h72
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h16
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h61
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c44
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c310
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c51
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c98
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c114
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c371
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c947
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h1090
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c1296
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h207
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h127
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h405
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c1808
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h227
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1349
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1568
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h80
-rw-r--r--drivers/net/ethernet/neterion/s2io.c26
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h19
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c7
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c116
-rw-r--r--drivers/net/ethernet/rocker/rocker.c162
-rw-r--r--drivers/net/ethernet/rocker/rocker.h2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c519
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c28
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3429
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/selftest.c14
-rw-r--r--drivers/net/ethernet/sfc/siena.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c59
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c78
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c83
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c95
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c138
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h9
-rw-r--r--drivers/net/ethernet/synopsys/Kconfig27
-rw-r--r--drivers/net/ethernet/synopsys/Makefile5
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c3019
-rw-r--r--drivers/net/ethernet/ti/cpsw.c83
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c16
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c401
-rw-r--r--drivers/net/hyperv/hyperv_net.h33
-rw-r--r--drivers/net/hyperv/netvsc.c43
-rw-r--r--drivers/net/hyperv/netvsc_drv.c161
-rw-r--r--drivers/net/hyperv/rndis_filter.c37
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/cc2520.c1
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/ifb.c207
-rw-r--r--drivers/net/macvlan.c1
-rw-r--r--drivers/net/phy/Kconfig20
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/aquantia.c152
-rw-r--r--drivers/net/phy/dp83640.c10
-rw-r--r--drivers/net/phy/dp83867.c6
-rw-r--r--drivers/net/phy/fixed_phy.c8
-rw-r--r--drivers/net/phy/marvell.c53
-rw-r--r--drivers/net/phy/mdio-octeon.c136
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/realtek.c14
-rw-r--r--drivers/net/phy/spi_ks8995.c22
-rw-r--r--drivers/net/phy/teranetics.c135
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/lan78xx.c3495
-rw-r--r--drivers/net/usb/lan78xx.h1069
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c23
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vxlan.c686
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c111
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h26
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c45
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h47
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c76
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c86
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h124
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c159
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c193
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c208
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h72
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c142
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1104
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h949
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c170
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c11
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c217
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
-rw-r--r--drivers/net/wireless/b43/lo.c4
-rw-r--r--drivers/net/wireless/b43/lo.h2
-rw-r--r--drivers/net/wireless/b43/phy_g.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c8
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c130
-rw-r--r--drivers/net/wireless/mwifiex/decl.h3
-rw-r--r--drivers/net/wireless/mwifiex/fw.h93
-rw-r--r--drivers/net/wireless/mwifiex/ie.c3
-rw-r--r--drivers/net/wireless/mwifiex/init.c3
-rw-r--r--drivers/net/wireless/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c63
-rw-r--r--drivers/net/wireless/mwifiex/main.h38
-rw-r--r--drivers/net/wireless/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c90
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c5
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c207
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c78
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c22
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c7
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c13
-rw-r--r--drivers/net/wireless/mwifiex/usb.c6
-rw-r--r--drivers/net/wireless/mwifiex/util.c59
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c156
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/def.h9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/reg.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c3
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/of/of_mdio.c19
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c66
-rw-r--r--include/linux/bpf.h12
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/ipv6.h4
-rw-r--r--include/linux/mlx4/cq.h3
-rw-r--r--include/linux/mlx4/device.h5
-rw-r--r--include/linux/mlx4/qp.h3
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h13
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/mpls_iptunnel.h6
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netdevice.h27
-rw-r--r--include/linux/netfilter.h42
-rw-r--r--include/linux/netfilter/x_tables.h8
-rw-r--r--include/linux/netfilter_bridge.h12
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/skbuff.h56
-rw-r--r--include/linux/stmmac.h22
-rw-r--r--include/net/act_api.h15
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/cfg802154.h2
-rw-r--r--include/net/cls_cgroup.h29
-rw-r--r--include/net/dsa.h17
-rw-r--r--include/net/dst.h6
-rw-r--r--include/net/dst_metadata.h56
-rw-r--r--include/net/fib_rules.h1
-rw-r--r--include/net/flow.h8
-rw-r--r--include/net/gre.h92
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inet_timewait_sock.h8
-rw-r--r--include/net/ip.h16
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip_fib.h5
-rw-r--r--include/net/ip_tunnels.h119
-rw-r--r--include/net/ipv6.h76
-rw-r--r--include/net/lwtunnel.h147
-rw-r--r--include/net/mac802154.h17
-rw-r--r--include/net/mpls_iptunnel.h29
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/netfilter.h1
-rw-r--r--include/net/route.h1
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sch_generic.h31
-rw-r--r--include/net/sock.h18
-rw-r--r--include/net/switchdev.h10
-rw-r--r--include/net/tc_act/tc_gact.h7
-rw-r--r--include/net/tc_act/tc_mirred.h2
-rw-r--r--include/net/tcp.h7
-rw-r--r--include/net/timewait_sock.h3
-rw-r--r--include/net/vxlan.h83
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/bpf.h29
-rw-r--r--include/uapi/linux/ethtool.h5
-rw-r--r--include/uapi/linux/fib_rules.h2
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--include/uapi/linux/if_link.h4
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/ipv6.h2
-rw-r--r--include/uapi/linux/lwtunnel.h16
-rw-r--r--include/uapi/linux/mpls.h2
-rw-r--r--include/uapi/linux/mpls_iptunnel.h28
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cttimeout.h2
-rw-r--r--include/uapi/linux/openvswitch.h2
-rw-r--r--include/uapi/linux/rtnetlink.h17
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--kernel/bpf/arraymap.c137
-rw-r--r--kernel/bpf/core.c9
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/verifier.c54
-rw-r--r--kernel/events/core.c78
-rw-r--r--kernel/trace/bpf_trace.c31
-rw-r--r--lib/test_bpf.c817
-rw-r--r--lib/test_rhashtable.c7
-rw-r--r--net/6lowpan/iphc.c2
-rw-r--r--net/Kconfig7
-rw-r--r--net/atm/br2684.c9
-rw-r--r--net/bluetooth/6lowpan.c23
-rw-r--r--net/bluetooth/Kconfig5
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c17
-rw-r--r--net/bluetooth/a2mp.h19
-rw-r--r--net/bluetooth/amp.c134
-rw-r--r--net/bluetooth/amp.h14
-rw-r--r--net/bluetooth/cmtp/capi.c8
-rw-r--r--net/bluetooth/hci_core.c11
-rw-r--r--net/bluetooth/hci_event.c157
-rw-r--r--net/bluetooth/l2cap_sock.c41
-rw-r--r--net/bluetooth/mgmt.c23
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_mdb.c144
-rw-r--r--net/bridge/br_multicast.c44
-rw-r--r--net/bridge/br_netfilter_hooks.c20
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/bridge/br_netlink.c16
-rw-r--r--net/bridge/br_private.h16
-rw-r--r--net/bridge/br_vlan.c18
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/dst.c107
-rw-r--r--net/core/fib_rules.c24
-rw-r--r--net/core/filter.c149
-rw-r--r--net/core/flow_dissector.c58
-rw-r--r--net/core/lwtunnel.c243
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/net-sysfs.c25
-rw-r--r--net/core/pktgen.c5
-rw-r--r--net/core/rtnetlink.c42
-rw-r--r--net/core/timestamping.c6
-rw-r--r--net/dsa/dsa.c12
-rw-r--r--net/dsa/dsa_priv.h8
-rw-r--r--net/dsa/slave.c244
-rw-r--r--net/dsa/tag_brcm.c15
-rw-r--r--net/dsa/tag_dsa.c12
-rw-r--r--net/dsa/tag_edsa.c12
-rw-r--r--net/dsa/tag_trailer.c12
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee802154/rdev-ops.h20
-rw-r--r--net/ieee802154/sysfs.c38
-rw-r--r--net/ieee802154/trace.h22
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/arp.c65
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/fib_semantics.c99
-rw-r--r--net/ipv4/gre_demux.c235
-rw-r--r--net/ipv4/icmp.c1
-rw-r--r--net/ipv4/inet_hashtables.c38
-rw-r--r--net/ipv4/inet_timewait_sock.c55
-rw-r--r--net/ipv4/ip_fragment.c26
-rw-r--r--net/ipv4/ip_gre.c446
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/ip_tunnel.c37
-rw-r--r--net/ipv4/ip_tunnel_core.c121
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c32
-rw-r--r--net/ipv4/netfilter/ip_tables.c68
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c7
-rw-r--r--net/ipv4/ping.c3
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c28
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cdg.c2
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_cubic.c4
-rw-r--r--net/ipv4/tcp_highspeed.c2
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_hybla.c2
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c64
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/tcp_metrics.c2
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_output.c8
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_veno.c2
-rw-r--r--net/ipv6/Kconfig11
-rw-r--r--net/ipv6/addrconf.c233
-rw-r--r--net/ipv6/addrconf_core.c11
-rw-r--r--net/ipv6/af_inet6.c12
-rw-r--r--net/ipv6/datagram.c10
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/inet6_hashtables.c9
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_gre.c5
-rw-r--r--net/ipv6/ip6_input.c5
-rw-r--r--net/ipv6/ip6_output.c18
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ndisc.c16
-rw-r--r--net/ipv6/netfilter/ip6_tables.c52
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c5
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c7
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/route.c77
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c15
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/mac802154/cfg.c54
-rw-r--r--net/mac802154/ieee802154_i.h11
-rw-r--r--net/mac802154/iface.c14
-rw-r--r--net/mac802154/main.c10
-rw-r--r--net/mac802154/rx.c14
-rw-r--r--net/mac802154/tx.c27
-rw-r--r--net/mac802154/util.c8
-rw-r--r--net/mpls/Kconfig8
-rw-r--r--net/mpls/Makefile1
-rw-r--r--net/mpls/af_mpls.c197
-rw-r--r--net/mpls/internal.h9
-rw-r--r--net/mpls/mpls_iptunnel.c233
-rw-r--r--net/netfilter/core.c225
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c101
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_queue.c12
-rw-r--r--net/netfilter/nf_tables_api.c8
-rw-r--r--net/netfilter/nf_tables_core.c5
-rw-r--r--net/netfilter/nft_meta.c4
-rw-r--r--net/netfilter/x_tables.c29
-rw-r--r--net/netfilter/xt_TEE.c15
-rw-r--r--net/netfilter/xt_TPROXY.c6
-rw-r--r--net/openvswitch/Kconfig2
-rw-r--r--net/openvswitch/Makefile2
-rw-r--r--net/openvswitch/actions.c17
-rw-r--r--net/openvswitch/datapath.c19
-rw-r--r--net/openvswitch/datapath.h5
-rw-r--r--net/openvswitch/dp_notify.c5
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/flow.h79
-rw-r--r--net/openvswitch/flow_netlink.c84
-rw-r--r--net/openvswitch/flow_netlink.h3
-rw-r--r--net/openvswitch/flow_table.c4
-rw-r--r--net/openvswitch/vport-geneve.c17
-rw-r--r--net/openvswitch/vport-gre.c239
-rw-r--r--net/openvswitch/vport-internal_dev.c38
-rw-r--r--net/openvswitch/vport-netdev.c117
-rw-r--r--net/openvswitch/vport-netdev.h16
-rw-r--r--net/openvswitch/vport-vxlan.c224
-rw-r--r--net/openvswitch/vport-vxlan.h11
-rw-r--r--net/openvswitch/vport.c34
-rw-r--r--net/openvswitch/vport.h21
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/rds/bind.c3
-rw-r--r--net/rds/connection.c16
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib_cm.c5
-rw-r--r--net/rds/iw.c2
-rw-r--r--net/rds/iw_cm.c5
-rw-r--r--net/rds/rds.h23
-rw-r--r--net/rds/send.c3
-rw-r--r--net/rds/tcp.c165
-rw-r--r--net/rds/tcp.h7
-rw-r--r--net/rds/tcp_connect.c9
-rw-r--r--net/rds/tcp_listen.c40
-rw-r--r--net/rds/transport.c4
-rw-r--r--net/sched/act_api.c44
-rw-r--r--net/sched/act_bpf.c53
-rw-r--r--net/sched/act_connmark.c3
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_gact.c44
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c58
-rw-r--r--net/sched/act_nat.c3
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/act_vlan.c3
-rw-r--r--net/sched/cls_cgroup.c23
-rw-r--r--net/sched/sch_qfq.c1
-rw-r--r--net/sctp/protocol.c42
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/switchdev/switchdev.c113
-rw-r--r--net/tipc/bcast.c31
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c30
-rw-r--r--net/tipc/bearer.h3
-rw-r--r--net/tipc/core.h10
-rw-r--r--net/tipc/discover.c130
-rw-r--r--net/tipc/link.c2030
-rw-r--r--net/tipc/link.h109
-rw-r--r--net/tipc/msg.c86
-rw-r--r--net/tipc/msg.h112
-rw-r--r--net/tipc/name_distr.c6
-rw-r--r--net/tipc/node.c964
-rw-r--r--net/tipc/node.h84
-rw-r--r--net/tipc/socket.c385
-rw-r--r--net/tipc/socket.h2
-rw-r--r--net/tipc/udp_media.c3
-rw-r--r--net/wimax/op-rfkill.c3
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--samples/bpf/Makefile4
-rw-r--r--samples/bpf/bpf_helpers.h27
-rw-r--r--samples/bpf/test_verifier.c59
-rw-r--r--samples/bpf/tracex1_kern.c2
-rw-r--r--samples/bpf/tracex2_kern.c6
-rw-r--r--samples/bpf/tracex3_kern.c4
-rw-r--r--samples/bpf/tracex4_kern.c6
-rw-r--r--samples/bpf/tracex5_kern.c6
-rw-r--r--samples/bpf/tracex6_kern.c27
-rw-r--r--samples/bpf/tracex6_user.c72
-rw-r--r--tools/net/bpf_jit_disasm.c109
646 files changed, 46365 insertions, 10137 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index f0b4cd72411d..9cf9a0ec333c 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -58,6 +58,10 @@ Optionnal property:
Documentation/devicetree/bindings/net/ethernet.txt
for details.
+- mii-bus : Should be a phandle to a valid MDIO bus device node.
+ This mii-bus will be used in preference to the
+ global dsa,mii-bus defined above, for this switch.
+
Optional subnodes:
- fixed-link : Fixed-link subnode describing a link to a non-MDIO
managed entity. See
@@ -107,6 +111,7 @@ Example:
#address-cells = <1>;
#size-cells = <0>;
reg = <17 1>; /* MDIO address 17, switch 1 in tree */
+ mii-bus = <&mii_bus1>;
switch1uplink: port@0 {
reg = <0>;
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index 41b3f3f864e8..5d88f37480b6 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -25,7 +25,11 @@ The following properties are common to the Ethernet controllers:
flow control thresholds.
- tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
is used for components that can have configurable fifo sizes.
+- managed: string, specifies the PHY management type. Supported values are:
+ "auto", "in-band-status". "auto" is the default, it usess MDIO for
+ management if fixed-link is not specified.
Child nodes of the Ethernet controller are typically the individual PHY devices
connected via the MDIO bus (sometimes the MDIO bus controller is separate).
They are described in the phy.txt file in this same directory.
+For non-MDIO PHY management see fixed-link.txt.
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt
index d0e6fa38f335..b30ab6b5cbfa 100644
--- a/Documentation/devicetree/bindings/net/keystone-netcp.txt
+++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt
@@ -130,7 +130,11 @@ Required properties:
Optional properties:
- efuse-mac: If this is 1, then the MAC address for the interface is
- obtained from the device efuse mac address register
+ obtained from the device efuse mac address register.
+ If this is 2, the two DWORDs occupied by the MAC address
+ are swapped. The netcp driver will swap the two DWORDs
+ back to the proper order when this property is set to 2
+ when it obtains the mac address from efuse.
- local-mac-address: the driver is designed to use the of_get_mac_address api
only if efuse-mac is 0. When efuse-mac is 0, the MAC
address is obtained from local-mac-address. If this
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
new file mode 100644
index 000000000000..51f8d2eba8d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
@@ -0,0 +1,75 @@
+* Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC)
+
+
+Required properties:
+- compatible: Should be "snps,dwc-qos-ethernet-4.10"
+- reg: Address and length of the register set for the device
+- clocks: Phandles to the reference clock and the bus clock
+- clock-names: Should be "phy_ref_clk" for the reference clock and "apb_pclk"
+ for the bus clock.
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupts: Should contain the core's combined interrupt signal
+- phy-mode: See ethernet.txt file in the same directory
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+- mac-address: See ethernet.txt in the same directory
+- local-mac-address: See ethernet.txt in the same directory
+- snps,en-lpi: If present it enables use of the AXI low-power interface
+- snps,write-requests: Number of write requests that the AXI port can issue.
+ It depends on the SoC configuration.
+- snps,read-requests: Number of read requests that the AXI port can issue.
+ It depends on the SoC configuration.
+- snps,burst-map: Bitmap of allowed AXI burst lengts, with the LSB
+ representing 4, then 8 etc.
+- snps,txpbl: DMA Programmable burst length for the TX DMA
+- snps,rxpbl: DMA Programmable burst length for the RX DMA
+- snps,en-tx-lpi-clockgating: Enable gating of the MAC TX clock during
+ TX low-power mode.
+- phy-handle: See ethernet.txt file in the same directory
+- mdio device tree subnode: When the GMAC has a phy connected to its local
+ mdio, there must be device tree subnode with the following
+ required properties:
+ - compatible: Must be "snps,dwc-qos-ethernet-mdio".
+ - #address-cells: Must be <1>.
+ - #size-cells: Must be <0>.
+
+ For each phy on the mdio bus, there must be a node with the following
+ fields:
+
+ - reg: phy id used to communicate to phy.
+ - device_type: Must be "ethernet-phy".
+ - fixed-mode device tree subnode: see fixed-link.txt in the same directory
+
+Examples:
+ethernet2@40010000 {
+ clock-names = "phy_ref_clk", "apb_pclk";
+ clocks = <&clkc 17>, <&clkc 15>;
+ compatible = "snps,dwc-qos-ethernet-4.10";
+ interrupt-parent = <&intc>;
+ interrupts = <0x0 0x1e 0x4>;
+ reg = <0x40010000 0x4000>;
+ phy-handle = <&phy2>;
+ phy-mode = "gmii";
+
+ snps,en-tx-lpi-clockgating;
+ snps,en-lpi;
+ snps,write-requests = <2>;
+ snps,read-requests = <16>;
+ snps,burst-map = <0x7>;
+ snps,txpbl = <8>;
+ snps,rxpbl = <2>;
+
+ dma-coherent;
+
+ mdio {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ phy2: phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ device_type = "ethernet-phy";
+ reg = <0x1>;
+ };
+ };
+};
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 5fae7704daab..56db1efd7189 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1215,14 +1215,20 @@ flowlabel_consistency - BOOLEAN
FALSE: disabled
Default: TRUE
-auto_flowlabels - BOOLEAN
- Automatically generate flow labels based based on a flow hash
- of the packet. This allows intermediate devices, such as routers,
- to idenfify packet flows for mechanisms like Equal Cost Multipath
+auto_flowlabels - INTEGER
+ Automatically generate flow labels based on a flow hash of the
+ packet. This allows intermediate devices, such as routers, to
+ identify packet flows for mechanisms like Equal Cost Multipath
Routing (see RFC 6438).
- TRUE: enabled
- FALSE: disabled
- Default: false
+ 0: automatic flow labels are completely disabled
+ 1: automatic flow labels are enabled by default, they can be
+ disabled on a per socket basis using the IPV6_AUTOFLOWLABEL
+ socket option
+ 2: automatic flow labels are allowed, they may be enabled on a
+ per socket basis using the IPV6_AUTOFLOWLABEL socket option
+ 3: automatic flow labels are enabled and enforced, they cannot
+ be disabled by the socket option
+ Default: 1
flowlabel_state_ranges - BOOLEAN
Split the flow label number space into two ranges. 0-0x7FFFF is
@@ -1340,6 +1346,14 @@ accept_ra_from_local - BOOLEAN
disabled if accept_ra_from_local is disabled
on a specific interface.
+accept_ra_min_hop_limit - INTEGER
+ Minimum hop limit Information in Router Advertisement.
+
+ Hop limit Information in Router Advertisement less than this
+ variable shall be ignored.
+
+ Default: 1
+
accept_ra_pinfo - BOOLEAN
Learn Prefix Information in Router Advertisement.
@@ -1435,6 +1449,11 @@ mtu - INTEGER
Default Maximum Transfer Unit
Default: 1280 (IPv6 required minimum)
+ip_nonlocal_bind - BOOLEAN
+ If set, allows processes to bind() to non-local IPv6 addresses,
+ which can be quite useful - but may break some applications.
+ Default: 0
+
router_probe_interval - INTEGER
Minimum interval (in seconds) between Router Probing described
in RFC4191.
@@ -1455,6 +1474,13 @@ router_solicitations - INTEGER
routers are present.
Default: 3
+use_oif_addrs_only - BOOLEAN
+ When enabled, the candidate source addresses for destinations
+ routed via this interface are restricted to the set of addresses
+ configured on this interface (vis. RFC 6724, section 4).
+
+ Default: false
+
use_tempaddr - INTEGER
Preference for Privacy Extensions (RFC3041).
<= 0 : disable Privacy Extensions
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index e655e2453c98..2903b1cf4d70 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -135,12 +135,8 @@ struct plat_stmmacenet_data {
int maxmtu;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
- void *custom_cfg;
- void *custom_data;
void *bsp_priv;
};
@@ -179,15 +175,11 @@ Where:
o bus_setup: perform HW setup of the bus. For example, on some ST platforms
this field is used to configure the AMBA bridge to generate more
efficient STBus traffic.
- o setup/init/exit: callbacks used for calling a custom initialization;
+ o init/exit: callbacks used for calling a custom initialization;
this is sometime necessary on some platforms (e.g. ST boxes)
where the HW needs to have set some PIO lines or system cfg
- registers. setup should return a pointer to private data,
- which will be stored in bsp_priv, and then passed to init and
- exit callbacks. init/exit callbacks should not use or modify
+ registers. init/exit callbacks should not use or modify
platform data.
- o custom_cfg/custom_data: this is a custom configuration that can be passed
- while initializing the resources.
o bsp_priv: another private pointer.
For MDIO bus The we have:
@@ -278,8 +270,6 @@ capability register can replace what has been passed from the platform.
Please see the following document:
Documentation/devicetree/bindings/net/stmmac.txt
-and the stmmac_of_data structure inside the include/linux/stmmac.h header file.
-
4.11) This is a summary of the content of some relevant files:
o stmmac_main.c: to implement the main network device driver;
o stmmac_mdio.c: to provide mdio functions;
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index c5d7ade10ff2..9825f32a8634 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -279,8 +279,18 @@ and unknown unicast packets to all ports in domain, if allowed by port's
current STP state. The switch driver, knowing which ports are within which
vlan L2 domain, can program the switch device for flooding. The packet should
also be sent to the port netdev for processing by the bridge driver. The
-bridge should not reflood the packet to the same ports the device flooded.
-XXX: the mechanism to avoid duplicate flood packets is being discuseed.
+bridge should not reflood the packet to the same ports the device flooded,
+otherwise there will be duplicate packets on the wire.
+
+To avoid duplicate packets, the device/driver should mark a packet as already
+forwarded using skb->offload_fwd_mark. The same mark is set on the device
+ports in the domain using dev->offload_fwd_mark. If the skb->offload_fwd_mark
+is non-zero and matches the forwarding egress port's dev->skb_mark, the kernel
+will drop the skb right before transmit on the egress port, with the
+understanding that the device already forwarded the packet on same egress port.
+The driver can use switchdev_port_fwd_mark_set() to set a globally unique mark
+for port's dev->offload_fwd_mark, based on the port's parent ID (switch ID) and
+a group ifindex.
It is possible for the switch device to not handle flooding and push the
packets up to the bridge driver for flooding. This is not ideal as the number
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 5f0922613f1a..a977339fbe0a 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -359,6 +359,13 @@ the requested fine-grained filtering for incoming packets is not
supported, the driver may time stamp more than just the requested types
of packets.
+Drivers are free to use a more permissive configuration than the requested
+configuration. It is expected that drivers should only implement directly the
+most generic mode that can be supported. For example if the hardware can
+support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
+HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
+is more generic (and more useful to applications).
+
A driver which supports hardware time stamping shall update the struct
with the actual, possibly more permissive configuration. If the
requested packets cannot be time stamped, then nothing should be
diff --git a/Documentation/networking/vxlan.txt b/Documentation/networking/vxlan.txt
index 6d993510f091..c28f4989c3f0 100644
--- a/Documentation/networking/vxlan.txt
+++ b/Documentation/networking/vxlan.txt
@@ -1,32 +1,36 @@
Virtual eXtensible Local Area Networking documentation
======================================================
-The VXLAN protocol is a tunnelling protocol that is designed to
-solve the problem of limited number of available VLAN's (4096).
-With VXLAN identifier is expanded to 24 bits.
-
-It is a draft RFC standard, that is implemented by Cisco Nexus,
-Vmware and Brocade. The protocol runs over UDP using a single
-destination port (still not standardized by IANA).
-This document describes the Linux kernel tunnel device,
-there is also an implantation of VXLAN for Openvswitch.
-
-Unlike most tunnels, a VXLAN is a 1 to N network, not just point
-to point. A VXLAN device can either dynamically learn the IP address
-of the other end, in a manner similar to a learning bridge, or the
-forwarding entries can be configured statically.
-
-The management of vxlan is done in a similar fashion to it's
-too closest neighbors GRE and VLAN. Configuring VXLAN requires
-the version of iproute2 that matches the kernel release
-where VXLAN was first merged upstream.
+The VXLAN protocol is a tunnelling protocol designed to solve the
+problem of limited VLAN IDs (4096) in IEEE 802.1q. With VXLAN the
+size of the identifier is expanded to 24 bits (16777216).
+
+VXLAN is described by IETF RFC 7348, and has been implemented by a
+number of vendors. The protocol runs over UDP using a single
+destination port. This document describes the Linux kernel tunnel
+device, there is also a separate implementation of VXLAN for
+Openvswitch.
+
+Unlike most tunnels, a VXLAN is a 1 to N network, not just point to
+point. A VXLAN device can learn the IP address of the other endpoint
+either dynamically in a manner similar to a learning bridge, or make
+use of statically-configured forwarding entries.
+
+The management of vxlan is done in a manner similar to its two closest
+neighbors GRE and VLAN. Configuring VXLAN requires the version of
+iproute2 that matches the kernel release where VXLAN was first merged
+upstream.
1. Create vxlan device
- # ip li add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1
-
-This creates a new device (vxlan0). The device uses the
-the multicast group 239.1.1.1 over eth1 to handle packets where
-no entry is in the forwarding table.
+ # ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1 dstport 4789
+
+This creates a new device named vxlan0. The device uses the multicast
+group 239.1.1.1 over eth1 to handle traffic for which there is no
+entry in the forwarding table. The destination port number is set to
+the IANA-assigned value of 4789. The Linux implementation of VXLAN
+pre-dates the IANA's selection of a standard destination port number
+and uses the Linux-selected value by default to maintain backwards
+compatibility.
2. Delete vxlan device
# ip link delete vxlan0
diff --git a/MAINTAINERS b/MAINTAINERS
index a9ae6c105520..ca51eba9fe5d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6510,7 +6510,7 @@ F: drivers/net/ethernet/marvell/mvneta.*
MARVELL MWIFIEX WIRELESS DRIVER
M: Amitkumar Karwar <akarwar@marvell.com>
-M: Avinash Patil <patila@marvell.com>
+M: Nishant Sarmukadam <nishants@marvell.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mwifiex/
@@ -6648,6 +6648,15 @@ W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlx4/en_*
+MELLANOX ETHERNET SWITCH DRIVERS
+M: Jiri Pirko <jiri@mellanox.com>
+M: Ido Schimmel <idosch@mellanox.com>
+L: netdev@vger.kernel.org
+S: Supported
+W: http://www.mellanox.com
+Q: http://patchwork.ozlabs.org/project/netdev/list/
+F: drivers/net/ethernet/mellanox/mlxsw/
+
MEMORY MANAGEMENT
L: linux-mm@kvack.org
W: http://www.linux-mm.org
@@ -8912,6 +8921,13 @@ F: include/linux/dma/dw.h
F: include/linux/platform_data/dma-dw.h
F: drivers/dma/dw/
+SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
+M: Lars Persson <lars.persson@axis.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
+
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
M: Seungwon Jeon <tgih.jun@samsung.com>
M: Jaehoon Chung <jh80.chung@samsung.com>
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c011e2296cb1..876060bcceeb 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -857,7 +857,9 @@ b_epilogue:
emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
break;
case BPF_ANC | SKF_AD_IFINDEX:
+ case BPF_ANC | SKF_AD_HATYPE:
/* A = skb->dev->ifindex */
+ /* A = skb->dev->type */
ctx->seen |= SEEN_SKB;
off = offsetof(struct sk_buff, dev);
emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
@@ -867,8 +869,24 @@ b_epilogue:
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
ifindex) != 4);
- off = offsetof(struct net_device, ifindex);
- emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+ type) != 2);
+
+ if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+ off = offsetof(struct net_device, ifindex);
+ emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+ } else {
+ /*
+ * offset of field "type" in "struct
+ * net_device" is above what can be
+ * used in the ldrh rd, [rn, #imm]
+ * instruction, so load the offset in
+ * a register and use ldrh rd, [rn, rm]
+ */
+ off = offsetof(struct net_device, type);
+ emit_mov_i(ARM_R3, off, ctx);
+ emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
+ }
break;
case BPF_ANC | SKF_AD_MARK:
ctx->seen |= SEEN_SKB;
@@ -895,6 +913,17 @@ b_epilogue:
OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
}
break;
+ case BPF_ANC | SKF_AD_PKTTYPE:
+ ctx->seen |= SEEN_SKB;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ __pkt_type_offset[0]) != 1);
+ off = PKT_TYPE_OFFSET();
+ emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
+ emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+ emit(ARM_LSR_I(r_A, r_A, 5), ctx);
+#endif
+ break;
case BPF_ANC | SKF_AD_QUEUE:
ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -904,6 +933,14 @@ b_epilogue:
off = offsetof(struct sk_buff, queue_mapping);
emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
break;
+ case BPF_ANC | SKF_AD_PAY_OFFSET:
+ ctx->seen |= SEEN_SKB | SEEN_CALL;
+
+ emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
+ emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
+ emit_blx_r(ARM_R3, ctx);
+ emit(ARM_MOV_R(r_A, ARM_R0), ctx);
+ break;
case BPF_LDX | BPF_W | BPF_ABS:
/*
* load a 32bit word from struct seccomp_data.
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index b2d7d92859d3..4b17d5ab652a 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -74,6 +74,7 @@
#define ARM_INST_LDRB_I 0x05d00000
#define ARM_INST_LDRB_R 0x07d00000
#define ARM_INST_LDRH_I 0x01d000b0
+#define ARM_INST_LDRH_R 0x019000b0
#define ARM_INST_LDR_I 0x05900000
#define ARM_INST_LDM 0x08900000
@@ -160,6 +161,8 @@
| (rm))
#define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
| (((off) & 0xf0) << 4) | ((off) & 0xf))
+#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \
+ | (rm))
#define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs))
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index f6498eec9ee1..f010c93a88b1 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -36,6 +36,8 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* | BPF stack | |
* | | |
* +---------------+ |
+ * | 8 byte skbp | |
+ * R15+170 -> +---------------+ |
* | 8 byte hlen | |
* R15+168 -> +---------------+ |
* | 4 byte align | |
@@ -51,11 +53,12 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* We get 160 bytes stack space from calling function, but only use
* 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
*/
-#define STK_SPACE (MAX_BPF_STACK + 8 + 4 + 4 + 160)
+#define STK_SPACE (MAX_BPF_STACK + 8 + 8 + 4 + 4 + 160)
#define STK_160_UNUSED (160 - 12 * 8)
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
+#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */
#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 8d2e5165865f..eeda051442c3 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -45,7 +45,7 @@ struct bpf_jit {
int labels[1]; /* Labels for local jumps */
};
-#define BPF_SIZE_MAX 4096 /* Max size for program */
+#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */
#define SEEN_SKB 1 /* skb access */
#define SEEN_MEM 2 /* use mem[] for temporary storage */
@@ -53,6 +53,7 @@ struct bpf_jit {
#define SEEN_LITERAL 8 /* code uses literals */
#define SEEN_FUNC 16 /* calls C functions */
#define SEEN_TAIL_CALL 32 /* code uses tail calls */
+#define SEEN_SKB_CHANGE 64 /* code changes skb data */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
/*
@@ -203,19 +204,11 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
_EMIT6(op1 | __disp, op2); \
})
-#define EMIT6_DISP(op1, op2, b1, b2, b3, disp) \
-({ \
- _EMIT6_DISP(op1 | reg(b1, b2) << 16 | \
- reg_high(b3) << 8, op2, disp); \
- REG_SET_SEEN(b1); \
- REG_SET_SEEN(b2); \
- REG_SET_SEEN(b3); \
-})
-
#define _EMIT6_DISP_LH(op1, op2, disp) \
({ \
- unsigned int __disp_h = ((u32)disp) & 0xff000; \
- unsigned int __disp_l = ((u32)disp) & 0x00fff; \
+ u32 _disp = (u32) disp; \
+ unsigned int __disp_h = _disp & 0xff000; \
+ unsigned int __disp_l = _disp & 0x00fff; \
_EMIT6(op1 | __disp_l, op2 | __disp_h >> 4); \
})
@@ -390,12 +383,32 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
}
/*
+ * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
+ * we store the SKB header length on the stack and the SKB data
+ * pointer in REG_SKB_DATA.
+ */
+static void emit_load_skb_data_hlen(struct bpf_jit *jit)
+{
+ /* Header length: llgf %w1,<len>(%b1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
+ offsetof(struct sk_buff, len));
+ /* s %w1,<data_len>(%b1) */
+ EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
+ offsetof(struct sk_buff, data_len));
+ /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
+ /* lg %skb_data,data_off(%b1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+ BPF_REG_1, offsetof(struct sk_buff, data));
+}
+
+/*
* Emit function prologue
*
* Save registers and create stack frame if necessary.
* See stack frame layout desription in "bpf_jit.h"!
*/
-static void bpf_jit_prologue(struct bpf_jit *jit)
+static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
{
if (jit->seen & SEEN_TAIL_CALL) {
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
@@ -429,32 +442,21 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
REG_15, 152);
}
- /*
- * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
- * we store the SKB header length on the stack and the SKB data
- * pointer in REG_SKB_DATA.
- */
- if (jit->seen & SEEN_SKB) {
- /* Header length: llgf %w1,<len>(%b1) */
- EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
- offsetof(struct sk_buff, len));
- /* s %w1,<data_len>(%b1) */
- EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
- offsetof(struct sk_buff, data_len));
- /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+ if (jit->seen & SEEN_SKB)
+ emit_load_skb_data_hlen(jit);
+ if (jit->seen & SEEN_SKB_CHANGE)
+ /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
- STK_OFF_HLEN);
- /* lg %skb_data,data_off(%b1) */
- EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
- BPF_REG_1, offsetof(struct sk_buff, data));
+ STK_OFF_SKBP);
+ /* Clear A (%b0) and X (%b7) registers for converted BPF programs */
+ if (is_classic) {
+ if (REG_SEEN(BPF_REG_A))
+ /* lghi %ba,0 */
+ EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
+ if (REG_SEEN(BPF_REG_X))
+ /* lghi %bx,0 */
+ EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
}
- /* BPF compatibility: clear A (%b0) and X (%b7) registers */
- if (REG_SEEN(BPF_REG_A))
- /* lghi %ba,0 */
- EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
- if (REG_SEEN(BPF_REG_X))
- /* lghi %bx,0 */
- EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
}
/*
@@ -976,12 +978,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
REG_SET_SEEN(BPF_REG_5);
jit->seen |= SEEN_FUNC;
/* lg %w1,<d(imm)>(%l) */
- EMIT6_DISP(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
- EMIT_CONST_U64(func));
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+ EMIT_CONST_U64(func));
/* basr %r14,%w1 */
EMIT2(0x0d00, REG_14, REG_W1);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
+ if (bpf_helper_changes_skb_data((void *)func)) {
+ jit->seen |= SEEN_SKB_CHANGE;
+ /* lg %b1,ST_OFF_SKBP(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
+ REG_15, STK_OFF_SKBP);
+ emit_load_skb_data_hlen(jit);
+ }
break;
}
case BPF_JMP | BPF_CALL | BPF_X:
@@ -1023,7 +1032,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
MAX_TAIL_CALL_CNT, 0, 0x2);
/*
- * prog = array->prog[index];
+ * prog = array->ptrs[index];
* if (prog == NULL)
* goto out;
*/
@@ -1032,7 +1041,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
/* lg %r1,prog(%b2,%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
- REG_1, offsetof(struct bpf_array, prog));
+ REG_1, offsetof(struct bpf_array, ptrs));
/* clgij %r1,0,0x8,label0 */
EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
@@ -1236,7 +1245,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
jit->lit = jit->lit_start;
jit->prg = 0;
- bpf_jit_prologue(jit);
+ bpf_jit_prologue(jit, bpf_prog_was_classic(fp));
for (i = 0; i < fp->len; i += insn_count) {
insn_count = bpf_jit_insn(jit, fp, i);
if (insn_count < 0)
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 7931eeeb649a..f8b9f71b9a2b 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -807,7 +807,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
}
if (bpf_jit_enable > 1)
- bpf_jit_dump(flen, proglen, pass, image);
+ bpf_jit_dump(flen, proglen, pass + 1, image);
if (image) {
bpf_flush_icache(image, image + proglen);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index be2e7a2b10d7..70efcd0940f9 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -246,7 +246,7 @@ static void emit_prologue(u8 **pprog)
* goto out;
* if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
- * prog = array->prog[index];
+ * prog = array->ptrs[index];
* if (prog == NULL)
* goto out;
* goto *(prog->bpf_func + prologue_size);
@@ -284,9 +284,9 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
- /* prog = array->prog[index]; */
+ /* prog = array->ptrs[index]; */
EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
- offsetof(struct bpf_array, prog));
+ offsetof(struct bpf_array, ptrs));
EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
/* if (prog == NULL)
@@ -315,6 +315,26 @@ static void emit_bpf_tail_call(u8 **pprog)
*pprog = prog;
}
+
+static void emit_load_skb_data_hlen(u8 **pprog)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ /* r9d = skb->len - skb->data_len (headlen)
+ * r10 = skb->data
+ */
+ /* mov %r9d, off32(%rdi) */
+ EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
+
+ /* sub %r9d, off32(%rdi) */
+ EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
+
+ /* mov %r10, off32(%rdi) */
+ EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
+ *pprog = prog;
+}
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx)
{
@@ -329,36 +349,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
emit_prologue(&prog);
- if (seen_ld_abs) {
- /* r9d : skb->len - skb->data_len (headlen)
- * r10 : skb->data
- */
- if (is_imm8(offsetof(struct sk_buff, len)))
- /* mov %r9d, off8(%rdi) */
- EMIT4(0x44, 0x8b, 0x4f,
- offsetof(struct sk_buff, len));
- else
- /* mov %r9d, off32(%rdi) */
- EMIT3_off32(0x44, 0x8b, 0x8f,
- offsetof(struct sk_buff, len));
-
- if (is_imm8(offsetof(struct sk_buff, data_len)))
- /* sub %r9d, off8(%rdi) */
- EMIT4(0x44, 0x2b, 0x4f,
- offsetof(struct sk_buff, data_len));
- else
- EMIT3_off32(0x44, 0x2b, 0x8f,
- offsetof(struct sk_buff, data_len));
-
- if (is_imm8(offsetof(struct sk_buff, data)))
- /* mov %r10, off8(%rdi) */
- EMIT4(0x4c, 0x8b, 0x57,
- offsetof(struct sk_buff, data));
- else
- /* mov %r10, off32(%rdi) */
- EMIT3_off32(0x4c, 0x8b, 0x97,
- offsetof(struct sk_buff, data));
- }
+ if (seen_ld_abs)
+ emit_load_skb_data_hlen(&prog);
for (i = 0; i < insn_cnt; i++, insn++) {
const s32 imm32 = insn->imm;
@@ -367,6 +359,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
u8 b1 = 0, b2 = 0, b3 = 0;
s64 jmp_offset;
u8 jmp_cond;
+ bool reload_skb_data;
int ilen;
u8 *func;
@@ -818,12 +811,18 @@ xadd: if (is_imm8(insn->off))
func = (u8 *) __bpf_call_base + imm32;
jmp_offset = func - (image + addrs[i]);
if (seen_ld_abs) {
- EMIT2(0x41, 0x52); /* push %r10 */
- EMIT2(0x41, 0x51); /* push %r9 */
- /* need to adjust jmp offset, since
- * pop %r9, pop %r10 take 4 bytes after call insn
- */
- jmp_offset += 4;
+ reload_skb_data = bpf_helper_changes_skb_data(func);
+ if (reload_skb_data) {
+ EMIT1(0x57); /* push %rdi */
+ jmp_offset += 22; /* pop, mov, sub, mov */
+ } else {
+ EMIT2(0x41, 0x52); /* push %r10 */
+ EMIT2(0x41, 0x51); /* push %r9 */
+ /* need to adjust jmp offset, since
+ * pop %r9, pop %r10 take 4 bytes after call insn
+ */
+ jmp_offset += 4;
+ }
}
if (!imm32 || !is_simm32(jmp_offset)) {
pr_err("unsupported bpf func %d addr %p image %p\n",
@@ -832,8 +831,13 @@ xadd: if (is_imm8(insn->off))
}
EMIT1_off32(0xE8, jmp_offset);
if (seen_ld_abs) {
- EMIT2(0x41, 0x59); /* pop %r9 */
- EMIT2(0x41, 0x5A); /* pop %r10 */
+ if (reload_skb_data) {
+ EMIT1(0x5F); /* pop %rdi */
+ emit_load_skb_data_hlen(&prog);
+ } else {
+ EMIT2(0x41, 0x59); /* pop %r9 */
+ EMIT2(0x41, 0x5A); /* pop %r10 */
+ }
}
break;
@@ -1099,7 +1103,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
}
if (bpf_jit_enable > 1)
- bpf_jit_dump(prog->len, proglen, 0, image);
+ bpf_jit_dump(prog->len, proglen, pass + 1, image);
if (image) {
bpf_flush_icache(header, image + proglen);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 9635f1033ce5..8d973c4fc84e 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of_platform.h>
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
@@ -409,6 +410,17 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
+ /* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
+ * of_default_bus_match_table is exported or in some other way
+ * accessible. This is just a temporary workaround.
+ */
+ if (IS_BUILTIN(CONFIG_BCMA) && bus->host_pdev) {
+ struct device *dev = &bus->host_pdev->dev;
+
+ of_platform_populate(dev->of_node, of_default_bus_match_table,
+ NULL, dev);
+ }
+
/* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) {
if (bcma_is_core_needed_early(core->id.id))
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 2e777071e1dc..79e8234b1aa5 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -132,6 +132,7 @@ config BT_HCIUART_3WIRE
config BT_HCIUART_INTEL
bool "Intel protocol support"
depends on BT_HCIUART
+ select BT_HCIUART_H4
select BT_INTEL
help
The Intel protocol support enables Bluetooth HCI over serial
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index fcfb72e9e0ee..a5c4d0584389 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -492,7 +492,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
- };
+ }
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 7aab65427d38..a00bb82eb7c6 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -427,7 +427,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
- };
+ }
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 9ceb8ac68fdc..02ed816a18f9 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -34,6 +34,7 @@
#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
+#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
int btbcm_check_bdaddr(struct hci_dev *hdev)
{
@@ -66,9 +67,13 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
*
* The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
* with waiting for configuration state.
+ *
+ * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller
+ * with waiting for configuration state.
*/
if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
- !bacmp(&bda->bdaddr, BDADDR_BCM4324B3)) {
+ !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) {
BT_INFO("%s: BCM: Using default device address (%pMR)",
hdev->name, &bda->bdaddr);
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -241,6 +246,7 @@ static const struct {
u16 subver;
const char *name;
} bcm_uart_subver_table[] = {
+ { 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 828f2f8d1568..1ce4ac16c7fa 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -89,6 +89,86 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
+void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct sk_buff *skb;
+ u8 type = 0x00;
+
+ BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reset after hardware error failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return;
+ }
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return;
+ }
+
+ if (skb->len != 13) {
+ BT_ERR("%s: Exception info size mismatch", hdev->name);
+ kfree_skb(skb);
+ return;
+ }
+
+ BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(btintel_hw_error);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+ const char *variant;
+
+ switch (ver->fw_variant) {
+ case 0x06:
+ variant = "Bootloader";
+ break;
+ case 0x23:
+ variant = "Firmware";
+ break;
+ default:
+ return;
+ }
+
+ BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
+ variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+ ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+}
+EXPORT_SYMBOL_GPL(btintel_version_info);
+
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+ const void *param)
+{
+ while (plen > 0) {
+ struct sk_buff *skb;
+ u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
+
+ cmd_param[0] = fragment_type;
+ memcpy(cmd_param + 1, param, fragment_len);
+
+ skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
+ cmd_param, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+
+ plen -= fragment_len;
+ param += fragment_len;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_secure_send);
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 4bda6ab34f60..b278d14758d5 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -73,6 +73,11 @@ struct intel_secure_send_result {
int btintel_check_bdaddr(struct hci_dev *hdev);
int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+void btintel_hw_error(struct hci_dev *hdev, u8 code);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+ const void *param);
#else
@@ -86,4 +91,18 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd
return -EOPNOTSUPP;
}
+static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+}
+
+static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+}
+
+static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
+ u32 plen, const void *param)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 086f0ec89580..27a9aac25583 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -95,10 +95,10 @@ struct btmrvl_private {
struct btmrvl_device btmrvl_dev;
struct btmrvl_adapter *adapter;
struct btmrvl_thread main_thread;
- int (*hw_host_to_card) (struct btmrvl_private *priv,
+ int (*hw_host_to_card)(struct btmrvl_private *priv,
u8 *payload, u16 nb);
- int (*hw_wakeup_firmware) (struct btmrvl_private *priv);
- int (*hw_process_int_status) (struct btmrvl_private *priv);
+ int (*hw_wakeup_firmware)(struct btmrvl_private *priv);
+ int (*hw_process_int_status)(struct btmrvl_private *priv);
void (*firmware_dump)(struct btmrvl_private *priv);
spinlock_t driver_lock; /* spinlock used by driver */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b4cf8d9c9dac..cc92b0f84a51 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -68,6 +68,9 @@ static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth AMP device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
+ /* Generic Bluetooth USB interface */
+ { USB_INTERFACE_INFO(0xe0, 0x01, 0x01) },
+
/* Apple-specific (Broadcom) devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_APPLE },
@@ -1878,51 +1881,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return -EILSEQ;
}
-static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
- u32 plen, const void *param)
-{
- while (plen > 0) {
- struct sk_buff *skb;
- u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
-
- cmd_param[0] = fragment_type;
- memcpy(cmd_param + 1, param, fragment_len);
-
- skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
- cmd_param, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- kfree_skb(skb);
-
- plen -= fragment_len;
- param += fragment_len;
- }
-
- return 0;
-}
-
-static void btusb_intel_version_info(struct hci_dev *hdev,
- struct intel_version *ver)
-{
- const char *variant;
-
- switch (ver->fw_variant) {
- case 0x06:
- variant = "Bootloader";
- break;
- case 0x23:
- variant = "Firmware";
- break;
- default:
- return;
- }
-
- BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
- variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
- ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
-}
-
static int btusb_setup_intel_new(struct hci_dev *hdev)
{
static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
@@ -1984,7 +1942,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return -EINVAL;
}
- btusb_intel_version_info(hdev, ver);
+ btintel_version_info(hdev, ver);
/* The firmware variant determines if the device is in bootloader
* mode or is running operational firmware. The value 0x06 identifies
@@ -2104,7 +2062,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Start the firmware download transaction with the Init fragment
* represented by the 128 bytes of CSS header.
*/
- err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
+ err = btintel_secure_send(hdev, 0x00, 128, fw->data);
if (err < 0) {
BT_ERR("%s: Failed to send firmware header (%d)",
hdev->name, err);
@@ -2114,7 +2072,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Send the 256 bytes of public key information from the firmware
* as the PKey fragment.
*/
- err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
+ err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
if (err < 0) {
BT_ERR("%s: Failed to send firmware public key (%d)",
hdev->name, err);
@@ -2124,7 +2082,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Send the 256 bytes of signature information from the firmware
* as the Sign fragment.
*/
- err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
+ err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
if (err < 0) {
BT_ERR("%s: Failed to send firmware signature (%d)",
hdev->name, err);
@@ -2148,8 +2106,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* firmware data buffer as a single Data fragement.
*/
if (!(frag_len % 4)) {
- err = btusb_intel_secure_send(hdev, 0x01, frag_len,
- fw_ptr);
+ err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
if (err < 0) {
BT_ERR("%s: Failed to send firmware data (%d)",
hdev->name, err);
@@ -2291,39 +2248,6 @@ done:
return 0;
}
-static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
-{
- struct sk_buff *skb;
- u8 type = 0x00;
-
- BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
-
- skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("%s: Reset after hardware error failed (%ld)",
- hdev->name, PTR_ERR(skb));
- return;
- }
- kfree_skb(skb);
-
- skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
- hdev->name, PTR_ERR(skb));
- return;
- }
-
- if (skb->len != 13) {
- BT_ERR("%s: Exception info size mismatch", hdev->name);
- kfree_skb(skb);
- return;
- }
-
- BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
-
- kfree_skb(skb);
-}
-
static int btusb_shutdown_intel(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -2783,7 +2707,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_INTEL_NEW) {
hdev->send = btusb_send_frame_intel;
hdev->setup = btusb_setup_intel_new;
- hdev->hw_error = btusb_hw_error_intel;
+ hdev->hw_error = btintel_hw_error;
hdev->set_bdaddr = btintel_set_bdaddr;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 78e10f0c65b2..84135c54ed2e 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -182,9 +182,9 @@ static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb)
int i;
printk(KERN_INFO "Bluetooth: Nokia control data =");
- for (i = 0; i < skb->len; i++) {
+ for (i = 0; i < skb->len; i++)
printk(" %02x", skb->data[i]);
- }
+
printk("\n");
/* transition to active state */
@@ -406,7 +406,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
break;
default:
return -EILSEQ;
- };
+ }
nsh.zero = 0;
nsh.len = skb->len;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 3455cecc9ecf..b35b238a0380 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -75,7 +75,7 @@ struct h5 {
size_t rx_pending; /* Expecting more bytes */
u8 rx_ack; /* Last ack number received */
- int (*rx_func) (struct hci_uart *hu, u8 c);
+ int (*rx_func)(struct hci_uart *hu, u8 c);
struct timer_list timer; /* Retransmission timer */
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 5dd07bf05236..21dfa89751df 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -24,8 +24,569 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/wait.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include "hci_uart.h"
+#include "btintel.h"
+
+#define STATE_BOOTLOADER 0
+#define STATE_DOWNLOADING 1
+#define STATE_FIRMWARE_LOADED 2
+#define STATE_FIRMWARE_FAILED 3
+#define STATE_BOOTING 4
+
+struct intel_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+ unsigned long flags;
+};
+
+static int intel_open(struct hci_uart *hu)
+{
+ struct intel_data *intel;
+
+ BT_DBG("hu %p", hu);
+
+ intel = kzalloc(sizeof(*intel), GFP_KERNEL);
+ if (!intel)
+ return -ENOMEM;
+
+ skb_queue_head_init(&intel->txq);
+
+ hu->priv = intel;
+ return 0;
+}
+
+static int intel_close(struct hci_uart *hu)
+{
+ struct intel_data *intel = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&intel->txq);
+ kfree_skb(intel->rx_skb);
+ kfree(intel);
+
+ hu->priv = NULL;
+ return 0;
+}
+
+static int intel_flush(struct hci_uart *hu)
+{
+ struct intel_data *intel = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&intel->txq);
+
+ return 0;
+}
+
+static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+ struct sk_buff *skb;
+ struct hci_event_hdr *hdr;
+ struct hci_ev_cmd_complete *evt;
+
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr->evt = HCI_EV_CMD_COMPLETE;
+ hdr->plen = sizeof(*evt) + 1;
+
+ evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+ evt->ncmd = 0x01;
+ evt->opcode = cpu_to_le16(opcode);
+
+ *skb_put(skb, 1) = 0x00;
+
+ bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+ return hci_recv_frame(hdev, skb);
+}
+
+static int intel_setup(struct hci_uart *hu)
+{
+ static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x08, 0x04, 0x00 };
+ struct intel_data *intel = hu->priv;
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+ struct intel_version *ver;
+ struct intel_boot_params *params;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ char fwname[64];
+ u32 frag_len;
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hu->hdev->set_bdaddr = btintel_set_bdaddr;
+
+ calltime = ktime_get();
+
+ set_bit(STATE_BOOTLOADER, &intel->flags);
+
+ /* Read the Intel version information to determine if the device
+ * is in bootloader mode or if it already has operational firmware
+ * loaded.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reading Intel version information failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*ver)) {
+ BT_ERR("%s: Intel version event size mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ ver = (struct intel_version *)skb->data;
+ if (ver->status) {
+ BT_ERR("%s: Intel version command failure (%02x)",
+ hdev->name, ver->status);
+ err = -bt_to_errno(ver->status);
+ kfree_skb(skb);
+ return err;
+ }
+
+ /* The hardware platform number has a fixed value of 0x37 and
+ * for now only accept this single value.
+ */
+ if (ver->hw_platform != 0x37) {
+ BT_ERR("%s: Unsupported Intel hardware platform (%u)",
+ hdev->name, ver->hw_platform);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+ * supported by this firmware loading method. This check has been
+ * put in place to ensure correct forward compatibility options
+ * when newer hardware variants come along.
+ */
+ if (ver->hw_variant != 0x0b) {
+ BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+ hdev->name, ver->hw_variant);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ btintel_version_info(hdev, ver);
+
+ /* The firmware variant determines if the device is in bootloader
+ * mode or is running operational firmware. The value 0x06 identifies
+ * the bootloader and the value 0x23 identifies the operational
+ * firmware.
+ *
+ * When the operational firmware is already present, then only
+ * the check for valid Bluetooth device address is needed. This
+ * determines if the device will be added as configured or
+ * unconfigured controller.
+ *
+ * It is not possible to use the Secure Boot Parameters in this
+ * case since that command is only available in bootloader mode.
+ */
+ if (ver->fw_variant == 0x23) {
+ kfree_skb(skb);
+ clear_bit(STATE_BOOTLOADER, &intel->flags);
+ btintel_check_bdaddr(hdev);
+ return 0;
+ }
+
+ /* If the device is not in bootloader mode, then the only possible
+ * choice is to return an error and abort the device initialization.
+ */
+ if (ver->fw_variant != 0x06) {
+ BT_ERR("%s: Unsupported Intel firmware variant (%u)",
+ hdev->name, ver->fw_variant);
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ kfree_skb(skb);
+
+ /* Read the secure boot parameters to identify the operating
+ * details of the bootloader.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*params)) {
+ BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ params = (struct intel_boot_params *)skb->data;
+ if (params->status) {
+ BT_ERR("%s: Intel boot parameters command failure (%02x)",
+ hdev->name, params->status);
+ err = -bt_to_errno(params->status);
+ kfree_skb(skb);
+ return err;
+ }
+
+ BT_INFO("%s: Device revision is %u", hdev->name,
+ le16_to_cpu(params->dev_revid));
+
+ BT_INFO("%s: Secure boot is %s", hdev->name,
+ params->secure_boot ? "enabled" : "disabled");
+
+ BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+ params->min_fw_build_nn, params->min_fw_build_cw,
+ 2000 + params->min_fw_build_yy);
+
+ /* It is required that every single firmware fragment is acknowledged
+ * with a command complete event. If the boot parameters indicate
+ * that this bootloader does not send them, then abort the setup.
+ */
+ if (params->limited_cce != 0x00) {
+ BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
+ hdev->name, params->limited_cce);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /* If the OTP has no valid Bluetooth device address, then there will
+ * also be no valid address for the operational firmware.
+ */
+ if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+ BT_INFO("%s: No device address configured", hdev->name);
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ }
+
+ /* With this Intel bootloader only the hardware variant and device
+ * revision information are used to select the right firmware.
+ *
+ * Currently this bootloader support is limited to hardware variant
+ * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+ */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
+ le16_to_cpu(params->dev_revid));
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ BT_ERR("%s: Failed to load Intel firmware file (%d)",
+ hdev->name, err);
+ kfree_skb(skb);
+ return err;
+ }
+
+ BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+
+ kfree_skb(skb);
+
+ if (fw->size < 644) {
+ BT_ERR("%s: Invalid size of firmware file (%zu)",
+ hdev->name, fw->size);
+ err = -EBADF;
+ goto done;
+ }
+
+ set_bit(STATE_DOWNLOADING, &intel->flags);
+
+ /* Start the firmware download transaction with the Init fragment
+ * represented by the 128 bytes of CSS header.
+ */
+ err = btintel_secure_send(hdev, 0x00, 128, fw->data);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware header (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of public key information from the firmware
+ * as the PKey fragment.
+ */
+ err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware public key (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of signature information from the firmware
+ * as the Sign fragment.
+ */
+ err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware signature (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ fw_ptr = fw->data + 644;
+ frag_len = 0;
+
+ while (fw_ptr - fw->data < fw->size) {
+ struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+
+ frag_len += sizeof(*cmd) + cmd->plen;
+
+ BT_DBG("%s: patching %td/%zu", hdev->name,
+ (fw_ptr - fw->data), fw->size);
+
+ /* The parameter length of the secure send command requires
+ * a 4 byte alignment. It happens so that the firmware file
+ * contains proper Intel_NOP commands to align the fragments
+ * as needed.
+ *
+ * Send set of commands with 4 byte alignment from the
+ * firmware data buffer as a single Data fragement.
+ */
+ if (frag_len % 4)
+ continue;
+
+ /* Send each command from the firmware data buffer as
+ * a single Data fragment.
+ */
+ err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware data (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ fw_ptr += frag_len;
+ frag_len = 0;
+ }
+
+ set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
+
+ BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+
+ /* Before switching the device into operational mode and with that
+ * booting the loaded firmware, wait for the bootloader notification
+ * that all fragments have been successfully received.
+ *
+ * When the event processing receives the notification, then the
+ * STATE_DOWNLOADING flag will be cleared.
+ *
+ * The firmware loading should not take longer than 5 seconds
+ * and thus just timeout if that happens and fail the setup
+ * of this device.
+ */
+ err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(5000));
+ if (err == 1) {
+ BT_ERR("%s: Firmware loading interrupted", hdev->name);
+ err = -EINTR;
+ goto done;
+ }
+
+ if (err) {
+ BT_ERR("%s: Firmware loading timeout", hdev->name);
+ err = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
+ BT_ERR("%s: Firmware loading failed", hdev->name);
+ err = -ENOEXEC;
+ goto done;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+ BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+
+done:
+ release_firmware(fw);
+
+ if (err < 0)
+ return err;
+
+ calltime = ktime_get();
+
+ set_bit(STATE_BOOTING, &intel->flags);
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+
+ /* The bootloader will not indicate when the device is ready. This
+ * is done by the operational firmware sending bootup notification.
+ *
+ * Booting into operational firmware should not take longer than
+ * 1 second. However if that happens, then just fail the setup
+ * since something went wrong.
+ */
+ BT_INFO("%s: Waiting for device to boot", hdev->name);
+
+ err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(1000));
+
+ if (err == 1) {
+ BT_ERR("%s: Device boot interrupted", hdev->name);
+ return -EINTR;
+ }
+
+ if (err) {
+ BT_ERR("%s: Device boot timeout", hdev->name);
+ return -ETIMEDOUT;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+ BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+
+ clear_bit(STATE_BOOTLOADER, &intel->flags);
+
+ return 0;
+}
+
+static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct intel_data *intel = hu->priv;
+ struct hci_event_hdr *hdr;
+
+ if (!test_bit(STATE_BOOTLOADER, &intel->flags))
+ goto recv;
+
+ hdr = (void *)skb->data;
+
+ /* When the firmware loading completes the device sends
+ * out a vendor specific event indicating the result of
+ * the firmware loading.
+ */
+ if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
+ skb->data[2] == 0x06) {
+ if (skb->data[3] != 0x00)
+ set_bit(STATE_FIRMWARE_FAILED, &intel->flags);
+
+ if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) &&
+ test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) {
+ smp_mb__after_atomic();
+ wake_up_bit(&intel->flags, STATE_DOWNLOADING);
+ }
+
+ /* When switching to the operational firmware the device
+ * sends a vendor specific event indicating that the bootup
+ * completed.
+ */
+ } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
+ skb->data[2] == 0x02) {
+ if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) {
+ smp_mb__after_atomic();
+ wake_up_bit(&intel->flags, STATE_BOOTING);
+ }
+ }
+recv:
+ return hci_recv_frame(hdev, skb);
+}
+
+static const struct h4_recv_pkt intel_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = intel_recv_event },
+};
+
+static int intel_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct intel_data *intel = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+ intel_recv_pkts,
+ ARRAY_SIZE(intel_recv_pkts));
+ if (IS_ERR(intel->rx_skb)) {
+ int err = PTR_ERR(intel->rx_skb);
+ BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ intel->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct intel_data *intel = hu->priv;
+
+ BT_DBG("hu %p skb %p", hu, skb);
+
+ skb_queue_tail(&intel->txq, skb);
+
+ return 0;
+}
+
+static struct sk_buff *intel_dequeue(struct hci_uart *hu)
+{
+ struct intel_data *intel = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&intel->txq);
+ if (!skb)
+ return skb;
+
+ if (test_bit(STATE_BOOTLOADER, &intel->flags) &&
+ (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) {
+ struct hci_command_hdr *cmd = (void *)skb->data;
+ __u16 opcode = le16_to_cpu(cmd->opcode);
+
+ /* When the 0xfc01 command is issued to boot into
+ * the operational firmware, it will actually not
+ * send a command complete event. To keep the flow
+ * control working inject that event here.
+ */
+ if (opcode == 0xfc01)
+ inject_cmd_complete(hu->hdev, opcode);
+ }
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+ return skb;
+}
+
+static const struct hci_uart_proto intel_proto = {
+ .id = HCI_UART_INTEL,
+ .name = "Intel",
+ .init_speed = 115200,
+ .open = intel_open,
+ .close = intel_close,
+ .flush = intel_flush,
+ .setup = intel_setup,
+ .recv = intel_recv,
+ .enqueue = intel_enqueue,
+ .dequeue = intel_dequeue,
+};
+
+int __init intel_init(void)
+{
+ return hci_uart_register_proto(&intel_proto);
+}
+
+int __exit intel_deinit(void)
+{
+ return hci_uart_unregister_proto(&intel_proto);
+}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 177dd69fdd95..20c2ac193ff9 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -770,7 +770,7 @@ static int __init hci_uart_init(void)
/* Register the tty discipline */
- memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc));
+ memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
hci_uart_ldisc.magic = TTY_LDISC_MAGIC;
hci_uart_ldisc.name = "n_hci";
hci_uart_ldisc.open = hci_uart_tty_open;
@@ -804,6 +804,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_3WIRE
h5_init();
#endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+ intel_init();
+#endif
#ifdef CONFIG_BT_HCIUART_BCM
bcm_init();
#endif
@@ -830,6 +833,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_3WIRE
h5_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+ intel_deinit();
+#endif
#ifdef CONFIG_BT_HCIUART_BCM
bcm_deinit();
#endif
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index ce9c670956f5..496587a73a9d 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -167,6 +167,11 @@ int h5_init(void);
int h5_deinit(void);
#endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+int intel_init(void);
+int intel_deinit(void);
+#endif
+
#ifdef CONFIG_BT_HCIUART_BCM
int bcm_init(void);
int bcm_deinit(void);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 36eb3d012b6d..180a8f7ec82d 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -871,7 +871,7 @@ repoll:
if (is_eth) {
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
if (be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK) {
+ MLX4_CQE_CVLAN_PRESENT_MASK) {
wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
MLX4_CQE_VID_MASK;
} else {
diff --git a/drivers/isdn/mISDN/dsp_audio.c b/drivers/isdn/mISDN/dsp_audio.c
index 06022952a437..bbef98e7a16e 100644
--- a/drivers/isdn/mISDN/dsp_audio.c
+++ b/drivers/isdn/mISDN/dsp_audio.c
@@ -13,6 +13,7 @@
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include <linux/export.h>
+#include <linux/bitrev.h>
#include "core.h"
#include "dsp.h"
@@ -137,27 +138,14 @@ static unsigned char linear2ulaw(short sample)
return ulawbyte;
}
-static int reverse_bits(int i)
-{
- int z, j;
- z = 0;
-
- for (j = 0; j < 8; j++) {
- if ((i & (1 << j)) != 0)
- z |= 1 << (7 - j);
- }
- return z;
-}
-
-
void dsp_audio_generate_law_tables(void)
{
int i;
for (i = 0; i < 256; i++)
- dsp_audio_alaw_to_s32[i] = alaw2linear(reverse_bits(i));
+ dsp_audio_alaw_to_s32[i] = alaw2linear(bitrev8((u8)i));
for (i = 0; i < 256; i++)
- dsp_audio_ulaw_to_s32[i] = ulaw2linear(reverse_bits(i));
+ dsp_audio_ulaw_to_s32[i] = ulaw2linear(bitrev8((u8)i));
for (i = 0; i < 256; i++) {
dsp_audio_alaw_to_ulaw[i] =
@@ -176,13 +164,13 @@ dsp_audio_generate_s2law_table(void)
/* generating ulaw-table */
for (i = -32768; i < 32768; i++) {
dsp_audio_s16_to_law[i & 0xffff] =
- reverse_bits(linear2ulaw(i));
+ bitrev8(linear2ulaw(i));
}
} else {
/* generating alaw-table */
for (i = -32768; i < 32768; i++) {
dsp_audio_s16_to_law[i & 0xffff] =
- reverse_bits(linear2alaw(i));
+ bitrev8(linear2alaw(i));
}
}
}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 7fde4d5c2b28..3c45358844eb 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1870,8 +1870,6 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
static void ad_marker_response_received(struct bond_marker *marker,
struct port *port)
{
- marker = NULL;
- port = NULL;
/* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a98dd4f1b0e3..2d7d72c88519 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3780,7 +3780,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
struct slave *slave;
struct list_head *iter;
struct bond_up_slave *new_arr, *old_arr;
- int slaves_in_agg;
int agg_id = 0;
int ret = 0;
@@ -3811,7 +3810,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
}
goto out;
}
- slaves_in_agg = ad_info.ports;
agg_id = ad_info.aggregator_id;
}
bond_for_each_slave(bond, slave, iter) {
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 1bda29249d12..db760e84119f 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -111,6 +111,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_USER_PORT_KEY] = { .type = NLA_U16 },
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
+ [IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -405,7 +406,6 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
-
if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
int port_key =
nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
@@ -415,7 +415,6 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
-
if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
return -EINVAL;
@@ -426,6 +425,15 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
+ if (data[IFLA_BOND_TLB_DYNAMIC_LB]) {
+ int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
+
+ bond_opt_initval(&newval, dynamic_lb);
+ err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -476,6 +484,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
0;
}
@@ -598,6 +607,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.ad_select))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB,
+ bond->params.tlb_dynamic_lb))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index e9c624d54dd4..6dda57e2e724 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -420,6 +420,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_user_port_key_tbl,
.set = bond_option_ad_user_port_key_set,
+ },
+ [BOND_OPT_NUM_PEER_NOTIF_ALIAS] = {
+ .id = BOND_OPT_NUM_PEER_NOTIF_ALIAS,
+ .name = "num_grat_arp",
+ .desc = "Number of peer notifications to send on failover event",
+ .values = bond_num_peer_notif_tbl,
+ .set = bond_option_num_peer_notif_set
}
};
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 31835a4dab57..f4ae72086215 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -380,7 +380,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
bonding_show_ad_select, bonding_sysfs_store_option);
-/* Show and set the number of peer notifications to send after a failover event. */
+/* Show the number of peer notifications to send after a failover event. */
static ssize_t bonding_show_num_peer_notif(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -388,24 +388,10 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
struct bonding *bond = to_bond(d);
return sprintf(buf, "%d\n", bond->params.num_peer_notif);
}
-
-static ssize_t bonding_store_num_peer_notif(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
- bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+ bonding_show_num_peer_notif, bonding_sysfs_store_option);
static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
- bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+ bonding_show_num_peer_notif, bonding_sysfs_store_option);
/* Show the MII monitor interval. */
static ssize_t bonding_show_miimon(struct device *d,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7ad0a4d8e475..4c483d937481 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -46,13 +46,13 @@ config NET_DSA_MV88E6171
ethernet switches chips.
config NET_DSA_MV88E6352
- tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
+ tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6172, 88E6176 and 88E6352
- ethernet switch chips.
+ This enables support for the Marvell 88E6172, 88E6176, 88E6320,
+ 88E6321 and 88E6352 ethernet switch chips.
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 079897b3a955..289e20443d83 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -901,15 +901,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
struct fixed_phy_status *status)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
- u32 duplex, pause, speed;
+ u32 duplex, pause;
u32 reg;
duplex = core_readl(priv, CORE_DUPSTS);
pause = core_readl(priv, CORE_PAUSESTS);
- speed = core_readl(priv, CORE_SPDSTS);
-
- speed >>= (port * SPDSTS_SHIFT);
- speed &= SPDSTS_MASK;
status->link = 0;
@@ -944,18 +940,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
reg &= ~LINK_STS;
core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
- switch (speed) {
- case SPDSTS_10:
- status->speed = SPEED_10;
- break;
- case SPDSTS_100:
- status->speed = SPEED_100;
- break;
- case SPDSTS_1000:
- status->speed = SPEED_1000;
- break;
- }
-
if ((pause & (1 << port)) &&
(pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
status->asym_pause = 1;
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 1c7808495a9d..735f04cd83ee 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -116,9 +116,9 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.port_join_bridge = mv88e6xxx_join_bridge,
.port_leave_bridge = mv88e6xxx_leave_bridge,
.port_stp_update = mv88e6xxx_port_stp_update,
- .fdb_add = mv88e6xxx_port_fdb_add,
- .fdb_del = mv88e6xxx_port_fdb_del,
- .fdb_getnext = mv88e6xxx_port_fdb_getnext,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_getnext = mv88e6xxx_port_fdb_getnext,
};
MODULE_ALIAS("platform:mv88e6171");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 632815c10a40..a18f7c83d4cb 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -36,6 +36,18 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
return "Marvell 88E6172";
if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
return "Marvell 88E6176";
+ if (ret == PORT_SWITCH_ID_6320_A1)
+ return "Marvell 88E6320 (A1)";
+ if (ret == PORT_SWITCH_ID_6320_A2)
+ return "Marvell 88e6320 (A2)";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6320)
+ return "Marvell 88E6320";
+ if (ret == PORT_SWITCH_ID_6321_A1)
+ return "Marvell 88E6321 (A1)";
+ if (ret == PORT_SWITCH_ID_6321_A2)
+ return "Marvell 88e6321 (A2)";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6321)
+ return "Marvell 88E6321";
if (ret == PORT_SWITCH_ID_6352_A0)
return "Marvell 88E6352 (A0)";
if (ret == PORT_SWITCH_ID_6352_A1)
@@ -80,66 +92,6 @@ static int mv88e6352_setup_global(struct dsa_switch *ds)
return 0;
}
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
-{
- int ret;
-
- *temp = 0;
-
- ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
- if (ret < 0)
- return ret;
-
- *temp = (ret & 0xff) - 25;
-
- return 0;
-}
-
-static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
-{
- int ret;
-
- *temp = 0;
-
- ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
- if (ret < 0)
- return ret;
-
- *temp = (((ret >> 8) & 0x1f) * 5) - 25;
-
- return 0;
-}
-
-static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
-{
- int ret;
-
- ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
- if (ret < 0)
- return ret;
- temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
- return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
- (ret & 0xe0ff) | (temp << 8));
-}
-
-static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
-{
- int ret;
-
- *alarm = false;
-
- ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
- if (ret < 0)
- return ret;
-
- *alarm = !!(ret & 0x40);
-
- return 0;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
static int mv88e6352_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -171,8 +123,9 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
mutex_lock(&ps->eeprom_mutex);
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
- 0xc000 | (addr & 0xff));
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_READ |
+ (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
if (ret < 0)
goto error;
@@ -180,7 +133,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
if (ret < 0)
goto error;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15);
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
error:
mutex_unlock(&ps->eeprom_mutex);
return ret;
@@ -253,11 +206,11 @@ static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
{
int ret;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14);
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
if (ret < 0)
return ret;
- if (!(ret & 0x0400))
+ if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
return -EROFS;
return 0;
@@ -271,12 +224,13 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
mutex_lock(&ps->eeprom_mutex);
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data);
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
if (ret < 0)
goto error;
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
- 0xb000 | (addr & 0xff));
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_WRITE |
+ (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
if (ret < 0)
goto error;
@@ -377,10 +331,10 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
.set_eee = mv88e6xxx_set_eee,
.get_eee = mv88e6xxx_get_eee,
#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6352_get_temp,
- .get_temp_limit = mv88e6352_get_temp_limit,
- .set_temp_limit = mv88e6352_set_temp_limit,
- .get_temp_alarm = mv88e6352_get_temp_alarm,
+ .get_temp = mv88e6xxx_get_temp,
+ .get_temp_limit = mv88e6xxx_get_temp_limit,
+ .set_temp_limit = mv88e6xxx_set_temp_limit,
+ .get_temp_alarm = mv88e6xxx_get_temp_alarm,
#endif
.get_eeprom = mv88e6352_get_eeprom,
.set_eeprom = mv88e6352_set_eeprom,
@@ -389,10 +343,13 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
.port_join_bridge = mv88e6xxx_join_bridge,
.port_leave_bridge = mv88e6xxx_leave_bridge,
.port_stp_update = mv88e6xxx_port_stp_update,
- .fdb_add = mv88e6xxx_port_fdb_add,
- .fdb_del = mv88e6xxx_port_fdb_del,
- .fdb_getnext = mv88e6xxx_port_fdb_getnext,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_getnext = mv88e6xxx_port_fdb_getnext,
};
-MODULE_ALIAS("platform:mv88e6352");
MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6176");
+MODULE_ALIAS("platform:mv88e6320");
+MODULE_ALIAS("platform:mv88e6321");
+MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 561342466076..9978245474a7 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -517,6 +517,18 @@ static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
return false;
}
+static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6320:
+ case PORT_SWITCH_ID_6321:
+ return true;
+ }
+ return false;
+}
+
static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -565,7 +577,7 @@ static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
{
int ret;
- if (mv88e6xxx_6352_family(ds))
+ if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
@@ -796,54 +808,6 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
}
}
-#ifdef CONFIG_NET_DSA_HWMON
-
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
- int val;
-
- *temp = 0;
-
- mutex_lock(&ps->smi_mutex);
-
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
- if (ret < 0)
- goto error;
-
- /* Enable temperature sensor */
- ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
- if (ret < 0)
- goto error;
-
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
- if (ret < 0)
- goto error;
-
- /* Wait for temperature to stabilize */
- usleep_range(10000, 12000);
-
- val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
- if (val < 0) {
- ret = val;
- goto error;
- }
-
- /* Disable temperature sensor */
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
- if (ret < 0)
- goto error;
-
- *temp = ((val & 0x1f) - 5) * 5;
-
-error:
- _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
- mutex_unlock(&ps->smi_mutex);
- return ret;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
/* Must be called with SMI lock held */
static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
u16 mask)
@@ -1000,7 +964,7 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
if (ret < 0)
return ret;
@@ -1127,7 +1091,7 @@ int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
ps->bridge_mask[fid] = br_port_mask;
if (fid != ps->fid[port]) {
- ps->fid_mask |= 1 << ps->fid[port];
+ clear_bit(ps->fid[port], ps->fid_bitmap);
ps->fid[port] = fid;
ret = _mv88e6xxx_update_bridge_config(ds, fid);
}
@@ -1161,9 +1125,16 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
mutex_lock(&ps->smi_mutex);
- newfid = __ffs(ps->fid_mask);
+ newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
+ if (unlikely(newfid > ps->num_ports)) {
+ netdev_err(ds->ports[port], "all first %d FIDs are used\n",
+ ps->num_ports);
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
ps->fid[port] = newfid;
- ps->fid_mask &= ~(1 << newfid);
+ set_bit(newfid, ps->fid_bitmap);
ps->bridge_mask[fid] &= ~(1 << port);
ps->bridge_mask[newfid] = 1 << port;
@@ -1171,6 +1142,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
if (!ret)
ret = _mv88e6xxx_update_bridge_config(ds, newfid);
+unlock:
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -1210,8 +1182,8 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
return 0;
}
-static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
- const unsigned char *addr)
+static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
+ const unsigned char *addr)
{
int i, ret;
@@ -1226,7 +1198,7 @@ static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
return 0;
}
-static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
+static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
{
int i, ret;
@@ -1242,29 +1214,74 @@ static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
return 0;
}
-static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
- const unsigned char *addr, int state)
+static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
+ struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- u8 fid = ps->fid[port];
+ u16 reg = 0;
int ret;
ret = _mv88e6xxx_atu_wait(ds);
if (ret < 0)
return ret;
- ret = __mv88e6xxx_write_addr(ds, addr);
+ ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
- (0x10 << port) | state);
- if (ret)
+ if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ unsigned int mask, shift;
+
+ if (entry->trunk) {
+ reg |= GLOBAL_ATU_DATA_TRUNK;
+ mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+ shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+ } else {
+ mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+ shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+ }
+
+ reg |= (entry->portv_trunkid << shift) & mask;
+ }
+
+ reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg);
+ if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
+ return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
+}
+
+static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- return ret;
+ if (vid == 0)
+ return ps->fid[port];
+
+ return -ENOENT;
+}
+
+static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry = { 0 };
+ int ret;
+
+ ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
+ if (ret < 0)
+ return ret;
+
+ entry.fid = ret;
+ entry.state = state;
+ ether_addr_copy(entry.mac, addr);
+ if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ entry.trunk = false;
+ entry.portv_trunkid = BIT(port);
+ }
+
+ return _mv88e6xxx_atu_load(ds, &entry);
}
int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
@@ -1277,7 +1294,7 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
int ret;
mutex_lock(&ps->smi_mutex);
- ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
+ ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -1290,61 +1307,99 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
int ret;
mutex_lock(&ps->smi_mutex);
- ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
+ ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
GLOBAL_ATU_DATA_STATE_UNUSED);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
- unsigned char *addr, bool *is_static)
+static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
+ const unsigned char *addr,
+ struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- u8 fid = ps->fid[port];
- int ret, state;
+ struct mv88e6xxx_atu_entry next = { 0 };
+ int ret;
+
+ next.fid = fid;
ret = _mv88e6xxx_atu_wait(ds);
if (ret < 0)
return ret;
- ret = __mv88e6xxx_write_addr(ds, addr);
+ ret = _mv88e6xxx_atu_mac_write(ds, addr);
if (ret < 0)
return ret;
- do {
- ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
- if (ret < 0)
- return ret;
+ ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ if (ret < 0)
+ return ret;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
- if (ret < 0)
- return ret;
- state = ret & GLOBAL_ATU_DATA_STATE_MASK;
- if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
- return -ENOENT;
- } while (!(((ret >> 4) & 0xff) & (1 << port)));
+ ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
+ if (ret < 0)
+ return ret;
- ret = __mv88e6xxx_read_addr(ds, addr);
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
if (ret < 0)
return ret;
- *is_static = state == (is_multicast_ether_addr(addr) ?
- GLOBAL_ATU_DATA_STATE_MC_STATIC :
- GLOBAL_ATU_DATA_STATE_UC_STATIC);
+ next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+ if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ unsigned int mask, shift;
+
+ if (ret & GLOBAL_ATU_DATA_TRUNK) {
+ next.trunk = true;
+ mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+ shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+ } else {
+ next.trunk = false;
+ mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+ shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+ }
+ next.portv_trunkid = (ret & mask) >> shift;
+ }
+
+ *entry = next;
return 0;
}
/* get next entry for port */
int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
- unsigned char *addr, bool *is_static)
+ unsigned char *addr, u16 *vid, bool *is_static)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_atu_entry next;
+ u16 fid;
int ret;
mutex_lock(&ps->smi_mutex);
- ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
+
+ ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
+ if (ret < 0)
+ goto unlock;
+ fid = ret;
+
+ do {
+ if (is_broadcast_ether_addr(addr)) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
+ if (ret < 0)
+ goto unlock;
+
+ ether_addr_copy(addr, next.mac);
+
+ if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ continue;
+ } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
+
+ *is_static = next.state == (is_multicast_ether_addr(addr) ?
+ GLOBAL_ATU_DATA_STATE_MC_STATIC :
+ GLOBAL_ATU_DATA_STATE_UC_STATIC);
+unlock:
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -1377,7 +1432,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6065_family(ds)) {
+ mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
/* MAC Forcing register: don't force link, speed,
* duplex or flow control state to any particular
* values on physical ports, but force the CPU port
@@ -1423,7 +1478,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
- mv88e6xxx_6185_family(ds))
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
reg = PORT_CONTROL_IGMP_MLD_SNOOP |
PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
PORT_CONTROL_STATE_FORWARDING;
@@ -1431,7 +1486,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
reg |= PORT_CONTROL_DSA_TAG;
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
else
@@ -1441,14 +1497,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
- mv88e6xxx_6185_family(ds)) {
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
reg |= PORT_CONTROL_EGRESS_ADD_TAG;
}
}
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
+ mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
if (ds->dsa_port_mask & (1 << port))
reg |= PORT_CONTROL_FRAME_MODE_DSA;
if (port == dsa_upstream_port(ds))
@@ -1473,11 +1530,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg = 0;
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds))
+ mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
reg = PORT_CONTROL_2_MAP_DA;
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds))
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
reg |= PORT_CONTROL_2_JUMBO_10240;
if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
@@ -1514,7 +1571,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
goto abort;
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
/* Do not limit the period of time that this port can
* be paused for by the remote end or the period of
* time that this port can pause the remote end.
@@ -1564,7 +1622,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
/* Rate Control: disable ingress rate limiting. */
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
PORT_RATE_CONTROL, 0x0001);
@@ -1584,9 +1643,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* ports, and allow each of the 'real' ports to only talk to
* the upstream port.
*/
- fid = __ffs(ps->fid_mask);
+ fid = port + 1;
ps->fid[port] = fid;
- ps->fid_mask &= ~(1 << fid);
+ set_bit(fid, ps->fid_bitmap);
if (!dsa_is_cpu_port(ds, port))
ps->bridge_mask[fid] = 1 << port;
@@ -1683,7 +1742,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
unsigned char addr[6];
int ret, data, state;
- ret = __mv88e6xxx_write_addr(ds, bcast);
+ ret = _mv88e6xxx_atu_mac_write(ds, bcast);
if (ret < 0)
return ret;
@@ -1698,7 +1757,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
state = data & GLOBAL_ATU_DATA_STATE_MASK;
if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
break;
- ret = __mv88e6xxx_read_addr(ds, addr);
+ ret = _mv88e6xxx_atu_mac_read(ds, addr);
if (ret < 0)
return ret;
mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
@@ -1885,8 +1944,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
- ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
-
INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
@@ -1913,6 +1970,7 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
int mv88e6xxx_setup_global(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
int i;
/* Set the default address aging time to 5 minutes, and
@@ -1976,7 +2034,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
(i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
/* Send all frames with destination addresses matching
* 01:80:c2:00:00:2x to the CPU port.
*/
@@ -1995,7 +2054,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
/* Disable ingress rate limiting by resetting all
* ingress rate limit registers to their initial
* state.
@@ -2009,9 +2069,11 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
/* Wait for the flush to complete. */
- _mv88e6xxx_stats_wait(ds);
+ mutex_lock(&ps->smi_mutex);
+ ret = _mv88e6xxx_stats_wait(ds);
+ mutex_unlock(&ps->smi_mutex);
- return 0;
+ return ret;
}
int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
@@ -2162,6 +2224,132 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
return ret;
}
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+ int val;
+
+ *temp = 0;
+
+ mutex_lock(&ps->smi_mutex);
+
+ ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+ if (ret < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ if (ret < 0)
+ goto error;
+
+ ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ if (val < 0) {
+ ret = val;
+ goto error;
+ }
+
+ /* Disable temperature sensor */
+ ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+ if (ret < 0)
+ goto error;
+
+ *temp = ((val & 0x1f) - 5) * 5;
+
+error:
+ _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+ mutex_unlock(&ps->smi_mutex);
+ return ret;
+}
+
+static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ int ret;
+
+ *temp = 0;
+
+ ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
+ if (ret < 0)
+ return ret;
+
+ *temp = (ret & 0xff) - 25;
+
+ return 0;
+}
+
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+ return mv88e63xx_get_temp(ds, temp);
+
+ return mv88e61xx_get_temp(ds, temp);
+}
+
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+ int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ int ret;
+
+ if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ return -EOPNOTSUPP;
+
+ *temp = 0;
+
+ ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+ return 0;
+}
+
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+ int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ int ret;
+
+ if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ return -EOPNOTSUPP;
+
+ ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+ if (ret < 0)
+ return ret;
+ temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+ return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
+ (ret & 0xe0ff) | (temp << 8));
+}
+
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+ int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ int ret;
+
+ if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ return -EOPNOTSUPP;
+
+ *alarm = false;
+
+ ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *alarm = !!(ret & 0x40);
+
+ return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
static int __init mv88e6xxx_init(void)
{
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index a650b2656de9..10fae325671e 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -11,6 +11,8 @@
#ifndef __MV88E6XXX_H
#define __MV88E6XXX_H
+#include <linux/if_vlan.h>
+
#ifndef UINT64_MAX
#define UINT64_MAX (u64)(~((u64)0))
#endif
@@ -89,7 +91,12 @@
#define PORT_SWITCH_ID_6182 0x1a60
#define PORT_SWITCH_ID_6185 0x1a70
#define PORT_SWITCH_ID_6240 0x2400
-#define PORT_SWITCH_ID_6320 0x1250
+#define PORT_SWITCH_ID_6320 0x1150
+#define PORT_SWITCH_ID_6320_A1 0x1151
+#define PORT_SWITCH_ID_6320_A2 0x1152
+#define PORT_SWITCH_ID_6321 0x3100
+#define PORT_SWITCH_ID_6321_A1 0x3101
+#define PORT_SWITCH_ID_6321_A2 0x3102
#define PORT_SWITCH_ID_6350 0x3710
#define PORT_SWITCH_ID_6351 0x3750
#define PORT_SWITCH_ID_6352 0x3520
@@ -164,6 +171,7 @@
#define GLOBAL_MAC_01 0x01
#define GLOBAL_MAC_23 0x02
#define GLOBAL_MAC_45 0x03
+#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */
#define GLOBAL_CONTROL 0x04
#define GLOBAL_CONTROL_SW_RESET BIT(15)
#define GLOBAL_CONTROL_PPU_ENABLE BIT(14)
@@ -198,6 +206,8 @@
#define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY)
#define GLOBAL_ATU_DATA 0x0c
#define GLOBAL_ATU_DATA_TRUNK BIT(15)
+#define GLOBAL_ATU_DATA_TRUNK_ID_MASK 0x00f0
+#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT 4
#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3ff0
#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
@@ -280,8 +290,12 @@
#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3)
#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0
#define GLOBAL2_EEPROM_OP 0x14
-#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
-#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
+#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
+#define GLOBAL2_EEPROM_OP_WRITE ((3 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_READ ((4 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
+#define GLOBAL2_EEPROM_OP_WRITE_EN BIT(10)
+#define GLOBAL2_EEPROM_OP_ADDR_MASK 0xff
#define GLOBAL2_EEPROM_DATA 0x15
#define GLOBAL2_PTP_AVB_OP 0x16
#define GLOBAL2_PTP_AVB_DATA 0x17
@@ -304,6 +318,14 @@
#define GLOBAL2_QOS_WEIGHT 0x1c
#define GLOBAL2_MISC 0x1d
+struct mv88e6xxx_atu_entry {
+ u16 fid;
+ u8 state;
+ bool trunk;
+ u16 portv_trunkid;
+ u8 mac[ETH_ALEN];
+};
+
struct mv88e6xxx_priv_state {
/* When using multi-chip addressing, this mutex protects
* access to the indirect access registers. (In single-chip
@@ -342,9 +364,9 @@ struct mv88e6xxx_priv_state {
/* hw bridging */
- u32 fid_mask;
- u8 fid[DSA_MAX_PORTS];
- u16 bridge_mask[DSA_MAX_PORTS];
+ DECLARE_BITMAP(fid_bitmap, VLAN_N_VID); /* FIDs 1 to 4095 available */
+ u16 fid[DSA_MAX_PORTS]; /* per (non-bridged) port FID */
+ u16 bridge_mask[DSA_MAX_PORTS]; /* br groups (indexed by FID) */
unsigned long port_state_update_mask;
u8 port_state[DSA_MAX_PORTS];
@@ -389,7 +411,10 @@ int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p);
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
@@ -406,10 +431,11 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
- unsigned char *addr, bool *is_static);
+ unsigned char *addr, u16 *vid, bool *is_static);
int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
int reg, int val);
+
extern struct dsa_switch_driver mv88e6131_switch_driver;
extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
extern struct dsa_switch_driver mv88e6352_switch_driver;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f3bb1784066b..05aa7597dab9 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -167,6 +167,7 @@ source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/tile/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c51014b0464f..f42177b11723 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 580553d42d34..88ef67a998b4 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -71,8 +71,6 @@ int sgdma_initialize(struct altera_tse_private *priv)
SGDMA_CTRLREG_INTEN |
SGDMA_CTRLREG_ILASTD;
- priv->sgdmadesclen = sizeof(struct sgdma_descrip);
-
INIT_LIST_HEAD(&priv->txlisthd);
INIT_LIST_HEAD(&priv->rxlisthd);
@@ -254,7 +252,7 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
unsigned int pktstatus = 0;
dma_sync_single_for_cpu(priv->device,
priv->rxdescphys,
- priv->sgdmadesclen,
+ SGDMA_DESC_LEN,
DMA_FROM_DEVICE);
pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
@@ -374,7 +372,7 @@ static int sgdma_async_read(struct altera_tse_private *priv)
dma_sync_single_for_device(priv->device,
priv->rxdescphys,
- priv->sgdmadesclen,
+ SGDMA_DESC_LEN,
DMA_TO_DEVICE);
csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
@@ -402,7 +400,7 @@ static int sgdma_async_write(struct altera_tse_private *priv,
csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
dma_sync_single_for_device(priv->device, priv->txdescphys,
- priv->sgdmadesclen, DMA_TO_DEVICE);
+ SGDMA_DESC_LEN, DMA_TO_DEVICE);
csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
priv->tx_dma_csr,
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index 85bc33b218d9..bbd52f02330b 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -50,6 +50,7 @@ struct sgdma_descrip {
u8 control;
} __packed;
+#define SGDMA_DESC_LEN sizeof(struct sgdma_descrip)
#define SGDMA_STATUS_ERR BIT(0)
#define SGDMA_STATUS_LENGTH_ERR BIT(1)
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 2adb24d4523c..103c30ddddf7 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -458,7 +458,6 @@ struct altera_tse_private {
u32 rxctrlreg;
dma_addr_t rxdescphys;
dma_addr_t txdescphys;
- size_t sgdmadesclen;
struct list_head txlisthd;
struct list_head rxlisthd;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 932bd1862f7a..2795d6db10e1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -874,6 +874,8 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
atl1c_clean_buffer(pdev, buffer_info);
}
+ netdev_reset_queue(adapter->netdev);
+
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
ring_count);
@@ -1551,6 +1553,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
u16 hw_next_to_clean;
u16 reg;
+ unsigned int total_bytes = 0, total_packets = 0;
reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
@@ -1558,12 +1561,18 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
+ if (buffer_info->skb) {
+ total_bytes += buffer_info->skb->len;
+ total_packets++;
+ }
atl1c_clean_buffer(pdev, buffer_info);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
}
+ netdev_completed_queue(adapter->netdev, total_packets, total_bytes);
+
if (netif_queue_stopped(adapter->netdev) &&
netif_carrier_ok(adapter->netdev)) {
netif_wake_queue(adapter->netdev);
@@ -2256,6 +2265,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
} else {
+ netdev_sent_queue(adapter->netdev, skb->len);
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4566cdf0bc39..b9a5a97ed4dd 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -933,6 +933,21 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcm_sysport_poll_controller(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ disable_irq(priv->irq0);
+ bcm_sysport_rx_isr(priv->irq0, priv);
+ enable_irq(priv->irq0);
+
+ disable_irq(priv->irq1);
+ bcm_sysport_tx_isr(priv->irq1, priv);
+ enable_irq(priv->irq1);
+}
+#endif
+
static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1723,6 +1738,9 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_set_features = bcm_sysport_set_features,
.ndo_set_rx_mode = bcm_sysport_set_rx_mode,
.ndo_set_mac_address = bcm_sysport_change_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bcm_sysport_poll_controller,
+#endif
};
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index cd4ae76bbff2..5762c485ea06 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,8 @@
-/* bnx2x.h: Broadcom Everest network driver.
+/* bnx2x.h: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,7 +32,7 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.710.51-0"
+#define DRV_MODULE_VERSION "1.712.30-0"
#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
@@ -1227,6 +1229,10 @@ struct bnx2x_slowpath {
} mac_rdata;
union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_rdata;
+
+ union {
struct tstorm_eth_mac_filter_config e1x;
struct eth_filter_rules_ramrod_data e2;
} rx_mode_rdata;
@@ -1408,6 +1414,9 @@ struct bnx2x_sp_objs {
/* Queue State object */
struct bnx2x_queue_sp_obj q_obj;
+
+ /* VLANs object */
+ struct bnx2x_vlan_mac_obj vlan_obj;
};
struct bnx2x_fp_stats {
@@ -1422,6 +1431,13 @@ enum {
SUB_MF_MODE_UNKNOWN = 0,
SUB_MF_MODE_UFP,
SUB_MF_MODE_NPAR1_DOT_5,
+ SUB_MF_MODE_BD,
+};
+
+struct bnx2x_vlan_entry {
+ struct list_head link;
+ u16 vid;
+ bool hw;
};
struct bnx2x {
@@ -1636,6 +1652,8 @@ struct bnx2x {
u8 mf_sub_mode;
#define IS_MF_UFP(bp) (IS_MF_SD(bp) && \
bp->mf_sub_mode == SUB_MF_MODE_UFP)
+#define IS_MF_BD(bp) (IS_MF_SD(bp) && \
+ bp->mf_sub_mode == SUB_MF_MODE_BD)
u8 wol;
@@ -1860,8 +1878,6 @@ struct bnx2x {
int dcb_version;
/* CAM credit pools */
-
- /* used only in sriov */
struct bnx2x_credit_pool_obj vlans_pool;
struct bnx2x_credit_pool_obj macs_pool;
@@ -1924,6 +1940,11 @@ struct bnx2x {
u16 rx_filter;
struct bnx2x_link_report_data vf_link_vars;
+ struct list_head vlan_reg;
+ u16 vlan_cnt;
+ u16 vlan_credit;
+ u16 vxlan_dst_port;
+ bool accept_any_vlan;
};
/* Tx queues may be less or equal to Rx queues */
@@ -1951,23 +1972,14 @@ extern int num_queues;
#define RSS_IPV6_TCP_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
-/* func init flags */
-#define FUNC_FLG_RSS 0x0001
-#define FUNC_FLG_STATS 0x0002
-/* removed FUNC_FLG_UNMATCHED 0x0004 */
-#define FUNC_FLG_TPA 0x0008
-#define FUNC_FLG_SPQ 0x0010
-#define FUNC_FLG_LEADING 0x0020 /* PF only */
-#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params {
/* dma */
- dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
- dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
+ bool spq_active;
+ dma_addr_t spq_map;
+ u16 spq_prod;
- u16 func_flgs;
u16 func_id; /* abs fid */
u16 pf_id;
- u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
};
#define for_each_cnic_queue(bp, var) \
@@ -2077,6 +2089,11 @@ struct bnx2x_func_init_params {
int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ unsigned long *ramrod_flags);
+
/**
* bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
*
@@ -2481,6 +2498,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define VF_ACQUIRE_THRESH 3
#define VF_ACQUIRE_MAC_FILTERS 1
#define VF_ACQUIRE_MC_FILTERS 10
+#define VF_ACQUIRE_VLAN_FILTERS 2 /* VLAN0 + 'real' VLAN */
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
@@ -2577,6 +2595,8 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
void bnx2x_update_mng_version(struct bnx2x *bp);
+void bnx2x_update_mfw_dump(struct bnx2x *bp);
+
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
@@ -2589,4 +2609,9 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
#define BNX2X_MAX_PHC_DRIFT 31000000
#define BNX2X_PTP_TX_TIMEOUT
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f7fbdc9d1325..a2bb1f0934d5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.c: Broadcom Everest network driver.
+/* bnx2x_cmn.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -2103,9 +2105,14 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (rss_obj->udp_rss_v6)
__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
- if (!CHIP_IS_E1x(bp))
+ if (!CHIP_IS_E1x(bp)) {
+ /* valid only for TUNN_MODE_VXLAN tunnel mode */
+ __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
/* valid only for TUNN_MODE_GRE tunnel mode */
- __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
+ __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+ }
} else {
__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
}
@@ -2510,6 +2517,20 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->mode = TPA_MODE_DISABLED;
}
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+ u32 cur;
+
+ if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+ return;
+
+ cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+ DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+ cur, state);
+
+ SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
int bnx2x_load_cnic(struct bnx2x *bp)
{
int i, rc, port = BP_PORT(bp);
@@ -2827,6 +2848,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start fast path */
+ /* Re-configure vlan filters */
+ rc = bnx2x_vlan_reconfigure_vid(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error3);
+
/* Initialize Rx filter. */
bnx2x_set_rx_mode_inner(bp);
@@ -2873,6 +2899,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* mark driver is loaded in shmem2 */
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ val &= ~DRV_FLAGS_MTU_MASK;
+ val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
DRV_FLAGS_CAPABILITIES_LOADED_L2);
@@ -2885,10 +2913,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return -EBUSY;
}
+ /* Update driver data for On-Chip MFW dump. */
+ if (IS_PF(bp))
+ bnx2x_update_mfw_dump(bp);
+
/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
bnx2x_dcbx_init(bp, false);
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
return 0;
@@ -2956,6 +2991,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
/* mark driver is unloaded in shmem2 */
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 val;
@@ -3677,7 +3715,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
pbd2->fw_ip_hdr_to_payload_w =
hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
pbd_e2->data.tunnel_data.flags |=
- ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
+ ETH_TUNNEL_DATA_IPV6_OUTER;
}
pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
@@ -4184,6 +4222,41 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+ int mfw_vn = BP_FW_MB_IDX(bp);
+ u32 tmp;
+
+ /* If the shmem shouldn't affect configuration, reflect */
+ if (!IS_MF_BD(bp)) {
+ int i;
+
+ for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+ c2s_map[i] = i;
+ *c2s_default = 0;
+
+ return;
+ }
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ c2s_map[0] = tmp & 0xff;
+ c2s_map[1] = (tmp >> 8) & 0xff;
+ c2s_map[2] = (tmp >> 16) & 0xff;
+ c2s_map[3] = (tmp >> 24) & 0xff;
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ c2s_map[4] = tmp & 0xff;
+ c2s_map[5] = (tmp >> 8) & 0xff;
+ c2s_map[6] = (tmp >> 16) & 0xff;
+ c2s_map[7] = (tmp >> 24) & 0xff;
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
/**
* bnx2x_setup_tc - routine to configure net_device for multi tc
*
@@ -4194,8 +4267,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
{
- int cos, prio, count, offset;
struct bnx2x *bp = netdev_priv(dev);
+ u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+ int cos, prio, count, offset;
/* setup tc must be called under rtnl lock */
ASSERT_RTNL();
@@ -4219,12 +4293,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
return -EINVAL;
}
+ bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
/* configure priority to traffic class mapping */
for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
- netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+ int outer_prio = c2s_map[prio];
+
+ netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
"mapping priority %d to tc %d\n",
- prio, bp->prio_to_cos[prio]);
+ outer_prio, bp->prio_to_cos[outer_prio]);
}
/* Use this configuration to differentiate tc0 from other COSes
@@ -4278,6 +4356,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
if (netif_running(dev))
rc = bnx2x_set_eth_mac(bp, true);
+ if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
return rc;
}
@@ -4831,6 +4912,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
+ if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
return bnx2x_reload_if_running(dev);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 03b7404d5b9b..e18a0e4d3ed1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.h: Broadcom Everest network driver.
+/* bnx2x_cmn.h: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -620,6 +622,14 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
*/
void bnx2x_tx_timeout(struct net_device *dev);
+/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
+ * c2s_map should have BNX2X_MAX_PRIORITY entries.
+ * @bp: driver handle
+ * @c2s_map: should have BNX2X_MAX_PRIORITY entries for mapping
+ * @c2s_default: entry for non-tagged configuration
+ */
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
+
/*********************** Inlines **********************************/
/*********************** Fast path ********************************/
static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -931,14 +941,33 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
start_params->mf_mode = bp->mf_mode;
start_params->sd_vlan_tag = bp->mf_ov;
+ /* Configure Ethertype for BD mode */
+ if (IS_MF_BD(bp)) {
+ DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
+ start_params->sd_vlan_eth_type = ETH_P_8021AD;
+ REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
+ REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
+ REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
+
+ bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
+ &start_params->c2s_pri_default);
+ start_params->c2s_pri_valid = 1;
+
+ DP(NETIF_MSG_IFUP,
+ "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
+ start_params->c2s_pri[0], start_params->c2s_pri[1],
+ start_params->c2s_pri[2], start_params->c2s_pri[3],
+ start_params->c2s_pri[4], start_params->c2s_pri[5],
+ start_params->c2s_pri[6], start_params->c2s_pri[7],
+ start_params->c2s_pri_default);
+ }
+
if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
start_params->network_cos_mode = STATIC_COS;
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->tunnel_mode = TUNN_MODE_GRE;
- start_params->gre_tunnel_type = IPGRE_TUNNEL;
- start_params->inner_gre_rss_en = 1;
+ start_params->inner_rss = 1;
if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
start_params->class_fail_ethtype = ETH_P_FIP;
@@ -1037,6 +1066,15 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type,
&bp->macs_pool);
+
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+ fp->cl_id, fp->cid, BP_FUNC(bp),
+ bnx2x_sp(bp, vlan_rdata),
+ bnx2x_sp_mapping(bp, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &bp->sp_state, obj_type,
+ &bp->vlans_pool);
}
/**
@@ -1096,7 +1134,7 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
- bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+ bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
/* RSS configuration object */
@@ -1106,6 +1144,8 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_sp_mapping(bp, rss_rdata),
BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
BNX2X_OBJ_TYPE_RX);
+
+ bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
}
static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
@@ -1339,4 +1379,23 @@ void bnx2x_squeeze_objects(struct bnx2x *bp);
void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
u32 verbose);
+/**
+ * bnx2x_set_os_driver_state - write driver state for management FW usage
+ *
+ * @bp: driver handle
+ * @state: OS_DRIVER_STATE_* value reflecting current driver state
+ */
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
+
+/**
+ * bnx2x_nvram_read - reads data from nvram [might sleep]
+ *
+ * @bp: driver handle
+ * @offset: byte offset in nvram
+ * @ret_buf: pointer to buffer where data is to be stored
+ * @buf_size: Length of 'ret_buf' in bytes
+ */
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 6e4294ed1fc9..7ccf6684e0a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.c: Broadcom Everest network driver.
+/* bnx2x_dcb.c: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -1850,6 +1852,8 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
if (bp->dcbx_port_params.ets.cos_params[cos].
pri_bitmask & pri_bit)
tt2cos[pri].cos = cos;
+
+ pfc_fw_cfg->dcb_outer_pri[pri] = ttp[pri];
}
/* we never want the FW to add a 0 vlan tag */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index c6939ecb02c5..9a9517c0f703 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.h: Broadcom Everest network driver.
+/* bnx2x_dcb.h: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index 741aa130c19f..eccfa13b0f2d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,15 +1,17 @@
-/* bnx2x_dump.h: Broadcom Everest network driver.
+/* bnx2x_dump.h: QLogic Everest network driver.
*
* Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 5907c821d131..aeb7ce64452e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,8 @@
-/* bnx2x_ethtool.c: Broadcom Everest network driver.
+/* bnx2x_ethtool.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1129,6 +1131,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
} else
bp->wol = 0;
+ if (SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
return 0;
}
@@ -1343,8 +1348,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
return rc;
}
-static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
- int buf_size)
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size)
{
int rc;
u32 cmd_flags;
@@ -3578,17 +3583,8 @@ static int bnx2x_get_ts_info(struct net_device *dev,
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 7636e3c18771..226ab29f4cb6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,8 @@
-/* bnx2x_fw_defs.h: Broadcom Everest network driver.
+/* bnx2x_fw_defs.h: Qlogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -372,7 +374,7 @@
#define MAX_COS_NUMBER 4
#define MAX_TRAFFIC_TYPES 8
#define MAX_PFC_PRIORITIES 8
-
+#define MAX_VLAN_PRIORITIES 8
/* used by array traffic_type_to_priority[] to mark traffic type \
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index 8aafd9b5d6a2..9e3b5a1e9f4f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,8 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 058bc7328220..cafd5de675cf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,8 @@
-/* bnx2x_hsi.h: Broadcom Everest network driver.
+/* bnx2x_hsi.h: Qlogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -729,6 +731,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858 0x00001200
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
@@ -786,6 +789,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858 0x00001200
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
@@ -864,6 +868,7 @@ struct shared_feat_cfg { /* NVRAM Offset */
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE 0x00000500
#define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600
#define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700
@@ -2064,6 +2069,45 @@ struct ncsi_oem_fcoe_features {
#define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0
};
+enum curr_cfg_method_e {
+ CURR_CFG_MET_NONE = 0, /* default config */
+ CURR_CFG_MET_OS = 1,
+ CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct bdn_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct bdn_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct bdn_fc_npiv_tbl {
+ struct bdn_fc_npiv_cfg fc_npiv_cfg;
+ struct bdn_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct mdump_driver_info {
+ u32 epoc;
+ u32 drv_ver;
+ u32 fw_ver;
+
+ u32 valid_dump;
+ #define FIRST_DUMP_VALID (1 << 0)
+ #define SECOND_DUMP_VALID (1 << 1)
+
+ u32 flags;
+ #define ENABLE_ALL_TRIGGERS (0x7fffffff)
+ #define TRIGGER_MDUMP_ONCE (1 << 31)
+};
+
struct ncsi_oem_data {
u32 driver_version[4];
struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
@@ -2187,6 +2231,8 @@ struct shmem2_region {
#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+#define DRV_FLAGS_MTU_MASK 0xffff0000
+#define DRV_FLAGS_MTU_SHIFT 16
u32 extended_dev_info_shared_cfg_size;
@@ -2251,6 +2297,7 @@ struct shmem2_region {
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
#define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
+ #define LINK_ATTR_84858 0x00000002
#define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
#define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
#define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
@@ -2268,6 +2315,74 @@ struct shmem2_region {
/* We use indication for each PF (0..3) */
#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
+ union { /* For various OEMs */ /* Offset 0x1a0 */
+ u8 storage_boot_prog[E2_FUNC_MAX];
+ #define STORAGE_BOOT_PROG_MASK 0x000000FF
+ #define STORAGE_BOOT_PROG_NONE 0x00000000
+ #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED 0x00000002
+ #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS 0x00000002
+ #define STORAGE_BOOT_PROG_TARGET_FOUND 0x00000004
+ #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS 0x00000008
+ #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND 0x00000008
+ #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT 0x00000010
+ #define STORAGE_BOOT_PROG_IMG_DOWNLOADED 0x00000020
+ #define STORAGE_BOOT_PROG_OS_HANDOFF 0x00000040
+ #define STORAGE_BOOT_PROG_COMPLETED 0x00000080
+
+ u32 oem_i2c_data_addr;
+ };
+
+ /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+ /* For PCP values 0-3 use the map lower */
+ /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+ * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+ */
+ u32 c2s_pcp_map_lower[E2_FUNC_MAX]; /* 0x1a4 */
+
+ /* For PCP values 4-7 use the map upper */
+ /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+ * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+ */
+ u32 c2s_pcp_map_upper[E2_FUNC_MAX]; /* 0x1b4 */
+
+ /* For PCP default value get the MSB byte of the map default */
+ u32 c2s_pcp_map_default[E2_FUNC_MAX]; /* 0x1c4 */
+
+ /* FC_NPIV table offset in NVRAM */
+ u32 fc_npiv_nvram_tbl_addr[PORT_MAX]; /* 0x1d4 */
+
+ /* Shows last method that changed configuration of this device */
+ enum curr_cfg_method_e curr_cfg; /* 0x1dc */
+
+ /* Storm FW version, shold be kept in the format 0xMMmmbbdd:
+ * MM - Major, mm - Minor, bb - Build ,dd - Drop
+ */
+ u32 netproc_fw_ver; /* 0x1e0 */
+
+ /* Option ROM SMASH CLP version */
+ u32 clp_ver; /* 0x1e4 */
+
+ u32 pcie_bus_num; /* 0x1e8 */
+
+ u32 sriov_switch_mode; /* 0x1ec */
+ #define SRIOV_SWITCH_MODE_NONE 0x0
+ #define SRIOV_SWITCH_MODE_VEB 0x1
+ #define SRIOV_SWITCH_MODE_VEPA 0x2
+
+ u8 rsrv2[E2_FUNC_MAX]; /* 0x1f0 */
+
+ u32 img_inv_table_addr; /* Address to INV_TABLE_P */ /* 0x1f4 */
+
+ u32 mtu_size[E2_FUNC_MAX]; /* 0x1f8 */
+
+ u32 os_driver_state[E2_FUNC_MAX]; /* 0x208 */
+ #define OS_DRIVER_STATE_NOT_LOADED 0 /* not installed */
+ #define OS_DRIVER_STATE_LOADING 1 /* transition state */
+ #define OS_DRIVER_STATE_DISABLED 2 /* installed but disabled */
+ #define OS_DRIVER_STATE_ACTIVE 3 /* installed and active */
+
+ /* mini dump driver info */
+ struct mdump_driver_info drv_info; /* 0x218 */
};
@@ -2898,8 +3013,8 @@ struct afex_stats {
};
#define BCM_5710_FW_MAJOR_VERSION 7
-#define BCM_5710_FW_MINOR_VERSION 10
-#define BCM_5710_FW_REVISION_VERSION 51
+#define BCM_5710_FW_MINOR_VERSION 12
+#define BCM_5710_FW_REVISION_VERSION 30
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3901,7 +4016,11 @@ struct eth_fast_path_rx_cqe {
__le16 len_on_bd;
struct parsing_flags pars_flags;
union eth_sgl_or_raw_data sgl_or_raw_data;
- __le32 reserved1[7];
+ u8 tunn_type;
+ u8 tunn_inner_hdrs_offset;
+ __le16 reserved1;
+ __le32 tunn_tenant_id;
+ __le32 padding[5];
u32 marker;
};
@@ -4012,8 +4131,8 @@ struct eth_tunnel_data {
__le16 pseudo_csum;
u8 ip_hdr_start_inner_w;
u8 flags;
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0
#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
};
@@ -4120,16 +4239,12 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10
u8 rss_result_mask;
u8 reserved3;
__le16 reserved4;
@@ -4314,6 +4429,18 @@ enum eth_tunnel_non_lso_csum_location {
MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
};
+enum eth_tunn_type {
+ TUNN_TYPE_NONE,
+ TUNN_TYPE_VXLAN,
+ TUNN_TYPE_L2_GRE,
+ TUNN_TYPE_IPV4_GRE,
+ TUNN_TYPE_IPV6_GRE,
+ TUNN_TYPE_L2_GENEVE,
+ TUNN_TYPE_IPV4_GENEVE,
+ TUNN_TYPE_IPV6_GENEVE,
+ MAX_ETH_TUNN_TYPE
+};
+
/*
* Tx regular BD structure
*/
@@ -4758,6 +4885,9 @@ struct afex_vif_list_ramrod_data {
__le16 reserved1;
};
+struct c2s_pri_trans_table_entry {
+ u8 val[MAX_VLAN_PRIORITIES];
+};
/*
* cfc delete event data
@@ -5246,6 +5376,7 @@ struct flow_control_configuration {
u8 dont_add_pri_0_en;
u8 reserved1;
__le32 reserved2;
+ u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
};
@@ -5260,18 +5391,25 @@ struct function_start_data {
u8 path_id;
u8 network_cos_mode;
u8 dmae_cmd_id;
- u8 tunnel_mode;
- u8 gre_tunnel_type;
- u8 tunn_clss_en;
- u8 inner_gre_rss_en;
- u8 sd_accept_mf_clss_fail;
+ u8 no_added_tags;
+ __le16 reserved0;
+ __le32 reserved1;
+ u8 inner_clss_vxlan;
+ u8 inner_clss_l2gre;
+ u8 inner_clss_l2geneve;
+ u8 inner_rss;
__le16 vxlan_dst_port;
+ __le16 geneve_dst_port;
+ u8 sd_accept_mf_clss_fail;
+ u8 sd_accept_mf_clss_fail_match_ethtype;
__le16 sd_accept_mf_clss_fail_ethtype;
__le16 sd_vlan_eth_type;
u8 sd_vlan_force_pri_flg;
u8 sd_vlan_force_pri_val;
- u8 sd_accept_mf_clss_fail_match_ethtype;
- u8 no_added_tags;
+ u8 c2s_pri_tt_valid;
+ u8 c2s_pri_default;
+ u8 reserved2[6];
+ struct c2s_pri_trans_table_entry c2s_pri_trans_table;
};
struct function_update_data {
@@ -5289,11 +5427,12 @@ struct function_update_data {
u8 tx_switch_suspend;
u8 echo;
u8 update_tunn_cfg_flg;
- u8 tunnel_mode;
- u8 gre_tunnel_type;
- u8 tunn_clss_en;
- u8 inner_gre_rss_en;
+ u8 inner_clss_vxlan;
+ u8 inner_clss_l2gre;
+ u8 inner_clss_l2geneve;
+ u8 inner_rss;
__le16 vxlan_dst_port;
+ __le16 geneve_dst_port;
u8 sd_vlan_force_pri_change_flg;
u8 sd_vlan_force_pri_flg;
u8 sd_vlan_force_pri_val;
@@ -5302,6 +5441,8 @@ struct function_update_data {
u8 reserved1;
__le16 sd_vlan_tag;
__le16 sd_vlan_eth_type;
+ __le16 reserved0;
+ __le32 reserved2;
};
/*
@@ -5330,15 +5471,6 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
-
-/* GRE Tunnel Mode */
-enum gre_tunnel_type {
- NVGRE_TUNNEL,
- L2GRE_TUNNEL,
- IPGRE_TUNNEL,
- MAX_GRE_TUNNEL_TYPE
-};
-
/*
* Dynamic Host-Coalescing - Driver(host) counters
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index d6e1975b7b69..46ee2c01f4c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,9 @@
-/* bnx2x_init.h: Broadcom Everest network driver.
+/* bnx2x_init.h: Qlogic Everest network driver.
* Structures and macroes needed during the initialization.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 5669ed2e87d0..1835d2e451c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -1,8 +1,10 @@
-/* bnx2x_init_ops.h: Broadcom Everest network driver.
+/* bnx2x_init_ops.h: Qlogic Everest network driver.
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a0b03c27e0a3..d946bba43726 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,13 +1,15 @@
/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Written by Yaniv Rosner
@@ -9652,6 +9654,13 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8481/BCM84823/BCM84833 PHY SECTION */
/******************************************************************/
+static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy)
+{
+ return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858));
+}
+
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
struct bnx2x *bp,
u8 port)
@@ -9666,8 +9675,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
};
u16 fw_ver1;
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (bnx2x_is_8483x_8485x(phy)) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
phy->ver_addr);
@@ -9749,8 +9757,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+ if (bnx2x_is_8483x_8485x(phy))
offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
else
offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
@@ -9768,8 +9775,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
switch (action) {
case PHY_INIT:
- if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (!bnx2x_is_8483x_8485x(phy)) {
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, bp, params->port);
}
@@ -9901,8 +9907,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
/* Always write this if this is not 84833/4.
* For 84833/4, write it only when it's a forced speed.
*/
- if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+ if (!bnx2x_is_8483x_8485x(phy) ||
((autoneg_val & (1<<12)) == 0))
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
@@ -9949,8 +9954,86 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
return bnx2x_848xx_cmn_config_init(phy, params, vars);
}
-#define PHY84833_CMDHDLR_WAIT 300
-#define PHY84833_CMDHDLR_MAX_ARGS 5
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 fw_cmd,
+ u16 cmd_args[], int argc)
+{
+ int idx;
+ u16 val;
+ struct bnx2x *bp = params->bp;
+
+ /* Step 1: Poll the STATUS register to see whether the previous command
+ * is in progress or the system is busy (CMD_IN_PROGRESS or
+ * SYSTEM_BUSY). If previous command is in progress or system is busy,
+ * check again until the previous command finishes execution and the
+ * system is available for taking command
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+ (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
+ DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ /* Step2: If any parameters are required for the function, write them
+ * to the required DATA registers
+ */
+
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
+
+ /* Step3: When the firmware is ready for commands, write the 'Command
+ * code' to the CMD register
+ */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+ /* Step4: Once the command has been written, poll the STATUS register
+ * to check whether the command has completed (CMD_COMPLETED_PASS/
+ * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+ return -EINVAL;
+ }
+ /* Step5: Once the command has completed, read the specficied DATA
+ * registers for any saved results for the command, if applicable
+ */
+
+ /* Gather returning data */
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
+
+ return 0;
+}
+
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params, u16 fw_cmd,
u16 cmd_args[], int argc)
@@ -9960,16 +10043,16 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS,
+ MDIO_848xx_CMD_HDLR_STATUS,
PHY84833_STATUS_CMD_OPEN_OVERRIDE);
- for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS, &val);
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
usleep_range(1000, 2000);
}
- if (idx >= PHY84833_CMDHDLR_WAIT) {
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
return -EINVAL;
}
@@ -9977,42 +10060,62 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
/* Prepare argument(s) and issue command */
for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_DATA1 + idx,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
cmd_args[idx]);
}
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
- for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS, &val);
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
- (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
usleep_range(1000, 2000);
}
- if ((idx >= PHY84833_CMDHDLR_WAIT) ||
- (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
DP(NETIF_MSG_LINK, "FW cmd failed.\n");
return -EINVAL;
}
/* Gather returning data */
for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_DATA1 + idx,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
&cmd_args[idx]);
}
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS,
+ MDIO_848xx_CMD_HDLR_STATUS,
PHY84833_STATUS_CMD_CLEAR_COMPLETE);
return 0;
}
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 fw_cmd,
+ u16 cmd_args[], int argc)
+{
+ struct bnx2x *bp = params->bp;
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) ||
+ (REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ link_attr_sync[params->port])) &
+ LINK_ATTR_84858)) {
+ return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc);
+ } else {
+ return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc);
+ }
+}
+
+static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 pair_swap;
- u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+ u16 data[PHY848xx_CMDHDLR_MAX_ARGS];
int status;
struct bnx2x *bp = params->bp;
@@ -10028,8 +10131,9 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
/* Only the second argument is used for this command */
data[1] = (u16)pair_swap;
- status = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
+ status = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_PAIR_SWAP, data,
+ PHY848xx_CMDHDLR_MAX_ARGS);
if (status == 0)
DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@ -10118,8 +10222,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
/* Prevent Phy from working in EEE and advertising it */
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc) {
DP(NETIF_MSG_LINK, "EEE disable failed.\n");
return rc;
@@ -10136,8 +10240,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 cmd_args = 1;
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc) {
DP(NETIF_MSG_LINK, "EEE enable failed.\n");
return rc;
@@ -10155,7 +10259,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
u8 port, initialize = 1;
u16 val;
u32 actual_phy_selection;
- u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
+ u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
int rc = 0;
usleep_range(1000, 2000);
@@ -10180,8 +10284,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Wait for GPHY to come out of reset */
msleep(50);
- if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (!bnx2x_is_8483x_8485x(phy)) {
/* BCM84823 requires that XGXS links up first @ 10G for normal
* behavior.
*/
@@ -10192,7 +10295,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
vars->line_speed = temp;
}
+ /* Check if this is actually BCM84858 */
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ u16 hw_rev;
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+ if (hw_rev == BCM84858_PHY_ID) {
+ params->link_attr_sync |= LINK_ATTR_84858;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+ }
+ }
+
+ /* Set dual-media configuration according to configuration */
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, &val);
val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
@@ -10237,18 +10352,17 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
- bnx2x_84833_pair_swap_cfg(phy, params, vars);
+ if (bnx2x_is_8483x_8485x(phy)) {
+ bnx2x_848xx_pair_swap_cfg(phy, params, vars);
/* Keep AutogrEEEn disabled. */
cmd_args[0] = 0x0;
cmd_args[1] = 0x0;
cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args,
- PHY84833_CMDHDLR_MAX_ARGS);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+ PHY848xx_CMDHDLR_MAX_ARGS);
if (rc)
DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
@@ -10302,8 +10416,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
}
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (bnx2x_is_8483x_8485x(phy)) {
/* Bring PHY out of super isolate mode as the final step. */
bnx2x_cl45_read_and_write(bp, phy,
MDIO_CTL_DEVAD,
@@ -10435,8 +10548,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+ if (bnx2x_is_8483x_8485x(phy))
bnx2x_eee_an_resolve(phy, params, vars);
}
@@ -11842,6 +11954,40 @@ static const struct bnx2x_phy phy_84834 = {
.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
};
+static const struct bnx2x_phy phy_84858 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
static const struct bnx2x_phy phy_54618se = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
.addr = 0xff,
@@ -12128,6 +12274,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
*phy = phy_84834;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+ *phy = phy_84858;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
*phy = phy_54618se;
@@ -12184,9 +12333,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
- if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
- (phy->ver_addr)) {
+ if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) {
/* Remove 100Mb link supported for BCM84833/4 when phy fw
* version lower than or equal to 1.39
*/
@@ -13281,6 +13428,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
/* GPIO3's are linked, and so both need to be toggled
* to obtain required 2us pulse.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d9cce4c3899b..b7d251108c19 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,13 +1,15 @@
/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Written by Yaniv Rosner
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c27af12314ed..ad73a60de333 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,8 @@
-/* bnx2x_main.c: Broadcom Everest network driver.
+/* bnx2x_main.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -81,11 +83,11 @@
#define TX_TIMEOUT (5*HZ)
static char version[] =
- "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+ "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Eliezer Tamir");
-MODULE_DESCRIPTION("Broadcom NetXtreme II "
+MODULE_DESCRIPTION("QLogic "
"BCM57710/57711/57711E/"
"57712/57712_MF/57800/57800_MF/57810/57810_MF/"
"57840/57840_MF Driver");
@@ -163,27 +165,27 @@ enum bnx2x_board_type {
static struct {
char *name;
} board_info[] = {
- [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
- [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
- [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
- [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
- [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
- [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
- [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
- [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
- [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
- [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
- [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
- [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
- [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
- [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
- [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
- [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
- [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
- [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
- [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
- [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
- [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
+ [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
+ [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
+ [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
+ [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
+ [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
+ [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
+ [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
+ [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
+ [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
+ [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
+ [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
+ [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
+ [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
+ [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
+ [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+ [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
+ [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
+ [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
+ [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
};
#ifndef PCI_DEVICE_ID_NX2_57710
@@ -2916,7 +2918,7 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
- if (IS_MF_UFP(bp)) {
+ if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
int func = BP_ABS_FUNC(bp);
u32 val;
@@ -2943,16 +2945,16 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
bp->mf_ov);
goto fail;
+ } else {
+ DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
+ bp->mf_ov);
}
-
- DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
-
- bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
-
- return;
+ } else {
+ goto fail;
}
- /* not supported by SW yet */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+ return;
fail:
bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
}
@@ -3065,7 +3067,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
storm_memset_func_en(bp, p->func_id, 1);
/* spq */
- if (p->func_flgs & FUNC_FLG_SPQ) {
+ if (p->spq_active) {
storm_memset_spq_addr(bp, p->spq_map, p->func_id);
REG_WR(bp, XSEM_REG_FAST_MEMORY +
XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
@@ -3281,7 +3283,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
struct event_ring_data eq_data = { {0} };
- u16 flags;
if (!CHIP_IS_E1x(bp)) {
/* reset IGU PF statistics: MSIX + ATTN */
@@ -3298,15 +3299,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
BP_FUNC(bp) : BP_VN(bp))*4, 0);
}
- /* function setup flags */
- flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
-
- /* This flag is relevant for E1x only.
- * E2 doesn't have a TPA configuration in a function level.
- */
- flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
-
- func_init.func_flgs = flags;
+ func_init.spq_active = true;
func_init.pf_id = BP_FUNC(bp);
func_init.func_id = BP_FUNC(bp);
func_init.spq_map = bp->spq_mapping;
@@ -3707,6 +3700,34 @@ out:
ethver, iscsiver, fcoever);
}
+void bnx2x_update_mfw_dump(struct bnx2x *bp)
+{
+ struct timeval epoc;
+ u32 drv_ver;
+ u32 valid_dump;
+
+ if (!SHMEM2_HAS(bp, drv_info))
+ return;
+
+ /* Update Driver load time */
+ do_gettimeofday(&epoc);
+ SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
+
+ drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+ SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
+
+ SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+ /* Check & notify On-Chip dump. */
+ valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
+
+ if (valid_dump & FIRST_DUMP_VALID)
+ DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
+
+ if (valid_dump & SECOND_DUMP_VALID)
+ DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
+}
+
static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
{
u32 cmd_ok, cmd_fail;
@@ -5274,6 +5295,10 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
+ case BNX2X_FILTER_VLAN_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+ vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
+ break;
case BNX2X_FILTER_MCAST_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
/* This is only relevant for 57710 where multicast MACs are
@@ -5568,6 +5593,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
BNX2X_STATE_OPENING_WAIT4_PORT):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
cid = elem->message.data.eth_event.echo &
BNX2X_SWCID_MASK;
DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
@@ -5585,7 +5612,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+ DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
bnx2x_handle_classification_eqe(bp, elem);
break;
@@ -6173,6 +6200,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
__set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+ if (bp->accept_any_vlan) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+ }
+
break;
case BNX2X_RX_MODE_ALLMULTI:
__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
@@ -6184,6 +6216,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+ if (bp->accept_any_vlan) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+ }
+
break;
case BNX2X_RX_MODE_PROMISC:
/* According to definition of SI mode, iface in promisc mode
@@ -6204,18 +6241,15 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
else
__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
break;
default:
BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
return -EINVAL;
}
- /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
- if (rx_mode != BNX2X_RX_MODE_NONE) {
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
- }
-
return 0;
}
@@ -7429,6 +7463,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
} else
BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+ if (SHMEM2_HAS(bp, netproc_fw_ver))
+ SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
return 0;
}
@@ -8406,6 +8443,42 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
return rc;
}
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ unsigned long *ramrod_flags)
+{
+ int rc;
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Fill general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
+
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ else
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+ }
+
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+ if (rc == -EEXIST) {
+ /* Do not treat adding same vlan as error. */
+ DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+ rc = 0;
+ } else if (rc < 0) {
+ BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+ }
+
+ return rc;
+}
+
int bnx2x_del_all_macs(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
int mac_type, bool wait_for_comp)
@@ -11678,7 +11751,7 @@ static void validate_set_si_mode(struct bnx2x *bp)
static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
- int vn;
+ int vn, mfw_vn;
u32 val = 0, val2 = 0;
int rc = 0;
@@ -11768,6 +11841,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_mode = 0;
bp->mf_sub_mode = 0;
vn = BP_VN(bp);
+ mfw_vn = BP_FW_MB_IDX(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -11824,6 +11898,31 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_sub_mode = SUB_MF_MODE_BD;
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp,
+ func_mf_config[func].config);
+
+ if (SHMEM2_HAS(bp, mtu_size)) {
+ int mtu_idx = BP_FW_MB_IDX(bp);
+ u16 mtu_size;
+ u32 mtu;
+
+ mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
+ mtu_size = (u16)mtu;
+ DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
+ mtu_size, mtu);
+
+ /* if valid: update device mtu */
+ if (((mtu_size + ETH_HLEN) >=
+ ETH_MIN_PACKET_SIZE) &&
+ (mtu_size <=
+ ETH_MAX_JUMBO_PACKET_SIZE))
+ bp->dev->mtu = mtu_size;
+ }
+ break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
bp->mf_mode = MULTI_FUNCTION_SD;
bp->mf_sub_mode = SUB_MF_MODE_UFP;
@@ -11871,9 +11970,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
func, bp->mf_ov, bp->mf_ov);
- } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
+ } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
+ (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
dev_err(&bp->pdev->dev,
- "Unexpected - no valid MF OV for func %d in UFP mode\n",
+ "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
func);
bp->path_has_ovlan = true;
} else {
@@ -12078,6 +12178,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->drv_info_mutex);
sema_init(&bp->stats_lock, 1);
bp->drv_info_mng_owner = false;
+ INIT_LIST_HEAD(&bp->vlan_reg);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12596,6 +12697,169 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
return vxlan_features_check(skb, features);
}
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+ int rc;
+
+ if (IS_PF(bp)) {
+ unsigned long ramrod_flags = 0;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+ add, &ramrod_flags);
+ } else {
+ rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+ }
+
+ return rc;
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+ int rc = 0;
+
+ if (!bp->vlan_cnt) {
+ DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
+ return 0;
+ }
+
+ list_for_each_entry(vlan, &bp->vlan_reg, link) {
+ /* Prepare for cleanup in case of errors */
+ if (rc) {
+ vlan->hw = false;
+ continue;
+ }
+
+ if (!vlan->hw)
+ continue;
+
+ DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+
+ rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+ if (rc) {
+ BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
+ vlan->hw = false;
+ rc = -EINVAL;
+ continue;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_entry *vlan;
+ bool hw = false;
+ int rc = 0;
+
+ if (!netif_running(bp->dev)) {
+ DP(NETIF_MSG_IFUP,
+ "Ignoring VLAN configuration the interface is down\n");
+ return -EFAULT;
+ }
+
+ DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+ vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ bp->vlan_cnt++;
+ if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
+ DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
+ bp->accept_any_vlan = true;
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ } else if (bp->vlan_cnt <= bp->vlan_credit) {
+ rc = __bnx2x_vlan_configure_vid(bp, vid, true);
+ hw = true;
+ }
+
+ vlan->vid = vid;
+ vlan->hw = hw;
+
+ if (!rc) {
+ list_add(&vlan->link, &bp->vlan_reg);
+ } else {
+ bp->vlan_cnt--;
+ kfree(vlan);
+ }
+
+ DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+
+ return rc;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_entry *vlan;
+ int rc = 0;
+
+ if (!netif_running(bp->dev)) {
+ DP(NETIF_MSG_IFUP,
+ "Ignoring VLAN configuration the interface is down\n");
+ return -EFAULT;
+ }
+
+ DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+ if (!bp->vlan_cnt) {
+ BNX2X_ERR("Unable to kill VLAN %d\n", vid);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ if (vlan->vid == vid)
+ break;
+
+ if (vlan->vid != vid) {
+ BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+ return -EINVAL;
+ }
+
+ if (vlan->hw)
+ rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+
+ list_del(&vlan->link);
+ kfree(vlan);
+
+ bp->vlan_cnt--;
+
+ if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
+ /* Configure all non-configured entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link) {
+ if (vlan->hw)
+ continue;
+
+ rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+ if (rc) {
+ BNX2X_ERR("Unable to config VLAN %d\n",
+ vlan->vid);
+ continue;
+ }
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
+ vlan->vid);
+ vlan->hw = true;
+ }
+ DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
+ bp->accept_any_vlan = false;
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+
+ DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+ return rc;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -12609,6 +12873,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_fix_features = bnx2x_fix_features,
.ndo_set_features = bnx2x_set_features,
.ndo_tx_timeout = bnx2x_tx_timeout,
+ .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
@@ -12819,6 +13085,18 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+ /* VF with OLD Hypervisor or old PF do not support filtering */
+ if (IS_PF(bp)) {
+ if (CHIP_IS_E1x(bp))
+ bp->accept_any_vlan = true;
+ else
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#ifdef CONFIG_BNX2X_SRIOV
+ } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+ }
+
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
@@ -13561,6 +13839,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bnx2x_register_phc(bp);
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
return 0;
init_one_exit:
@@ -13623,6 +13904,7 @@ static void __bnx2x_remove(struct pci_dev *pdev,
/* Power on: we can't let PCI layer write to us while we are in D3 */
if (IS_PF(bp)) {
bnx2x_set_power_state(bp, PCI_D0);
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
/* Set endianity registers to reset values in case next driver
* boots in different endianty environment.
@@ -14371,6 +14653,90 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
rc = -EINVAL;
}
+ /* For storage-only interfaces, change driver state */
+ if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
+ switch (ctl->drv_state) {
+ case DRV_NOP:
+ break;
+ case DRV_ACTIVE:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_ACTIVE);
+ break;
+ case DRV_INACTIVE:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_DISABLED);
+ break;
+ case DRV_UNLOADED:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_NOT_LOADED);
+ break;
+ default:
+ BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_get_fc_npiv(struct net_device *dev,
+ struct cnic_fc_npiv_tbl *cnic_tbl)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bdn_fc_npiv_tbl *tbl = NULL;
+ u32 offset, entries;
+ int rc = -EINVAL;
+ int i;
+
+ if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
+ goto out;
+
+ DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
+
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl) {
+ BNX2X_ERR("Failed to allocate fc_npiv table\n");
+ goto out;
+ }
+
+ offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
+ DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
+
+ /* Read the table contents from nvram */
+ if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
+ BNX2X_ERR("Failed to read FC-NPIV table\n");
+ goto out;
+ }
+
+ /* Since bnx2x_nvram_read() returns data in be32, we need to convert
+ * the number of entries back to cpu endianness.
+ */
+ entries = tbl->fc_npiv_cfg.num_of_npiv;
+ entries = (__force u32)be32_to_cpu((__force __be32)entries);
+ tbl->fc_npiv_cfg.num_of_npiv = entries;
+
+ if (!tbl->fc_npiv_cfg.num_of_npiv) {
+ DP(BNX2X_MSG_MCP,
+ "No FC-NPIV table [valid, simply not present]\n");
+ goto out;
+ } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
+ BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
+ tbl->fc_npiv_cfg.num_of_npiv);
+ goto out;
+ } else {
+ DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
+ tbl->fc_npiv_cfg.num_of_npiv);
+ }
+
+ /* Copy the data into cnic-provided struct */
+ cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
+ for (i = 0; i < cnic_tbl->count; i++) {
+ memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
+ memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
+ }
+
+ rc = 0;
+out:
+ kfree(tbl);
return rc;
}
@@ -14516,6 +14882,7 @@ static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
cp->drv_ctl = bnx2x_drv_ctl;
+ cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
index caf1aef651eb..a91ccbf36345 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -1,6 +1,8 @@
-/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+/* bnx2x_mfw_req.h: Qlogic Everest network driver.
*
* Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 49d511092c82..4dead49bd5cb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,8 @@
-/* bnx2x_reg.h: Broadcom Everest network driver.
+/* bnx2x_reg.h: Qlogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -2137,6 +2139,10 @@
/* [RW 1] When this bit is set; the LLH will expect all packets to be with
e1hov */
#define NIG_REG_LLH_E1HOV_MODE 0x160d8
+/* [RW 16] Outer VLAN type identifier for multi-function mode. In non
+ * multi-function mode; it will hold the inner VLAN type. Typically 0x8100.
+ */
+#define NIG_REG_LLH_E1HOV_TYPE_1 0x16028
/* [RW 1] When this bit is set; the LLH will classify the packet before
sending it to the BRB or calculating WoL on it. */
#define NIG_REG_LLH_MF_MODE 0x16024
@@ -2953,7 +2959,12 @@
#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
-#define PB_REG_CONTROL 0
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PBF_REG_VLAN_TYPE_0 0x15c06c
/* [RW 2] Interrupt mask register #0 read/write */
#define PB_REG_PB_INT_MASK 0x28
/* [R 2] Interrupt register #0 read */
@@ -3372,6 +3383,12 @@
#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
/* [R 8] debug only: TSDM current credit. Transaction based. */
#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PRS_REG_VLAN_TYPE_0 0x401a8
#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
@@ -7240,6 +7257,9 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB 0xffe2
+#define BCM84858_PHY_ID 0x600d
+#define MDIO_AN_REG_848xx_ID_LSB 0xffe3
#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
@@ -7283,31 +7303,31 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
-/* These are mailbox register set used by 84833. */
-#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
-#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
-#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
-#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
-#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
-#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
-#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
-#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
-#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
-#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
-#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
-#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
-#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
-#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
-#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
-#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
-#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
-#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31)
-/* Mailbox command set used by 84833. */
-#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
-#define PHY84833_CMD_GET_EEE_MODE 0x8008
-#define PHY84833_CMD_SET_EEE_MODE 0x8009
-/* Mailbox status set used by 84833. */
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001
+#define PHY848xx_CMD_GET_EEE_MODE 0x8008
+#define PHY848xx_CMD_SET_EEE_MODE 0x8009
+/* Mailbox status set used by 84833 only */
#define PHY84833_STATUS_CMD_RECEIVED 0x0001
#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
@@ -7318,6 +7338,13 @@ Theotherbitsarereservedandshouldbezero*/
#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED 0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb
+
/* Warpcore clause 45 addressing */
#define MDIO_WC_DEVAD 0x3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 4ad415ac8cfe..c9bd7f16018e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,15 +1,17 @@
-/* bnx2x_sp.c: Broadcom Everest network driver.
+/* bnx2x_sp.c: Qlogic Everest network driver.
*
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -355,6 +357,23 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->get(vp, 1);
}
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->get(mp, 1))
+ return false;
+
+ if (!vp->get(vp, 1)) {
+ mp->put(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -383,6 +402,22 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->put(vp, 1);
}
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->put(mp, 1))
+ return false;
+
+ if (!vp->put(vp, 1)) {
+ mp->get(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
/**
* __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
*
@@ -636,6 +671,26 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
return 0;
}
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
+ return -EEXIST;
+
+ return 0;
+}
+
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x *bp,
@@ -670,6 +725,27 @@ static struct bnx2x_vlan_mac_registry_elem *
return NULL;
}
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
+ return pos;
+
+ return NULL;
+}
+
/* check_move() callback */
static bool bnx2x_check_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
@@ -1036,6 +1112,96 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
rule_cnt);
}
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+ bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+ u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+ u16 inner_mac;
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Set a rule header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set VLAN and MAC themselves */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+ rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+ /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ struct bnx2x_vlan_mac_obj *target_obj;
+
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ target_obj = elem->cmd_data.vlan_mac.target_obj;
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+ true, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set a VLAN itself */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+ }
+
+ /* Set the ramrod data header */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @elem: bnx2x_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /* 57710 and 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ true : false;
+
+ /* Reset the ramrod data buffer */
+ memset(config, 0, sizeof(*config));
+
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+ elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+ ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*
@@ -1135,6 +1301,25 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
return NULL;
}
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_vlan_mac_ramrod_data *data =
+ &elem->cmd_data.vlan_mac.u.vlan_mac;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd ==
+ elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
/**
* bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
*
@@ -2042,6 +2227,68 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
}
}
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ union bnx2x_qable_obj *qable_obj =
+ (union bnx2x_qable_obj *)vlan_mac_obj;
+
+ bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, vlans_pool);
+
+ /* CAM pool handling */
+ vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+ vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+ /* CAM offset is relevant for 57710 and 57711 chips only which have a
+ * single CAM for both MACs and VLAN-MAC pairs. So the offset
+ * will be taken from MACs' pool object only.
+ */
+ vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+ vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+ if (CHIP_IS_E1(bp)) {
+ BNX2X_ERR("Do not support chips others than E2\n");
+ BUG();
+ } else if (CHIP_IS_E1H(bp)) {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move_always_err;
+ vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue, 1, qable_obj,
+ bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ } else {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move;
+ vlan_mac_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue,
+ CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ }
+}
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
@@ -3854,8 +4101,8 @@ static bool bnx2x_credit_pool_get_entry_always_true(
* If credit is negative pool operations will always succeed (unlimited pool).
*
*/
-static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
- int base, int credit)
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit)
{
/* Zero the object first */
memset(p, 0, sizeof(*p));
@@ -3934,9 +4181,9 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
/* CAM credit is equaly divided between all active functions
* on the PATH.
*/
- if ((func_num > 0)) {
+ if (func_num > 0) {
if (!CHIP_REV_IS_SLOW(bp))
- cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+ cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
else
cam_sz = BNX2X_CAM_SIZE_EMUL;
@@ -3966,8 +4213,9 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
* on the PATH.
*/
if (func_num > 0) {
- int credit = MAX_VLAN_CREDIT_E2 / func_num;
- bnx2x_init_credit_pool(p, func_id * credit, credit);
+ int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+ bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
} else
/* this should never happen! Block VLAN operations. */
bnx2x_init_credit_pool(p, 0, 0);
@@ -4060,8 +4308,14 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
- if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
- caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
/* RSS keys */
if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
@@ -5669,10 +5923,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
- rdata->tunnel_mode = start_params->tunnel_mode;
- rdata->gre_tunnel_type = start_params->gre_tunnel_type;
- rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
- rdata->vxlan_dst_port = cpu_to_le16(4789);
+
+ rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
+ rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
+ rdata->inner_clss_l2gre = start_params->inner_clss_l2gre;
+ rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
+ rdata->inner_clss_vxlan = start_params->inner_clss_vxlan;
+ rdata->inner_rss = start_params->inner_rss;
+
rdata->sd_accept_mf_clss_fail = start_params->class_fail;
if (start_params->class_fail_ethtype) {
rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
@@ -5690,6 +5948,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
cpu_to_le16(0x8100);
rdata->no_added_tags = start_params->no_added_tags;
+
+ rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
+ if (rdata->c2s_pri_tt_valid) {
+ memcpy(rdata->c2s_pri_trans_table.val,
+ start_params->c2s_pri,
+ MAX_VLAN_PRIORITIES);
+ rdata->c2s_pri_default = start_params->c2s_pri_default;
+ }
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
@@ -5750,15 +6016,22 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
&switch_update_params->changes)) {
rdata->update_tunn_cfg_flg = 1;
- if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+ &switch_update_params->changes))
+ rdata->inner_clss_l2gre = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+ &switch_update_params->changes))
+ rdata->inner_clss_vxlan = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
&switch_update_params->changes))
- rdata->tunn_clss_en = 1;
- if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+ rdata->inner_clss_l2geneve = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
&switch_update_params->changes))
- rdata->inner_gre_rss_en = 1;
- rdata->tunnel_mode = switch_update_params->tunnel_mode;
- rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
- rdata->vxlan_dst_port = cpu_to_le16(4789);
+ rdata->inner_rss = 1;
+ rdata->vxlan_dst_port =
+ cpu_to_le16(switch_update_params->vxlan_dst_port);
+ rdata->geneve_dst_port =
+ cpu_to_le16(switch_update_params->geneve_dst_port);
}
rdata->echo = SWITCH_UPDATE;
@@ -5885,6 +6158,8 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
+ for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
+ rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 86baecb7c60c..4048fc594cce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,15 +1,17 @@
-/* bnx2x_sp.h: Broadcom Everest network driver.
+/* bnx2x_sp.h: Qlogic Everest network driver.
*
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -711,7 +713,10 @@ enum {
BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP,
BNX2X_RSS_IPV6_UDP,
- BNX2X_RSS_GRE_INNER_HDRS,
+
+ BNX2X_RSS_IPV4_VXLAN,
+ BNX2X_RSS_IPV6_VXLAN,
+ BNX2X_RSS_TUNN_INNER_HDRS,
};
struct bnx2x_config_rss_params {
@@ -1105,8 +1110,10 @@ enum {
BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
- BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
- BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+ BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
};
/* Allowed Function states */
@@ -1171,19 +1178,23 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
- /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
- u8 tunnel_mode;
+ /* UDP dest port for VXLAN */
+ u16 vxlan_dst_port;
- /* tunneling classification enablement */
- u8 tunn_clss_en;
+ /* UDP dest port for Geneve */
+ u16 geneve_dst_port;
- /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
- u8 gre_tunnel_type;
+ /* Enable inner Rx classifications for L2GRE packets */
+ u8 inner_clss_l2gre;
- /* Enables Inner GRE RSS on the function, depends on the client RSS
- * capailities
- */
- u8 inner_gre_rss_en;
+ /* Enable inner Rx classifications for L2-Geneve packets */
+ u8 inner_clss_l2geneve;
+
+ /* Enable inner Rx classification for vxlan packets */
+ u8 inner_clss_vxlan;
+
+ /* Enable RSS according to inner header */
+ u8 inner_rss;
/* Allows accepting of packets failing MF classification, possibly
* only matching a given ethertype
@@ -1200,6 +1211,11 @@ struct bnx2x_func_start_params {
/* Prevent inner vlans from being added by FW */
u8 no_added_tags;
+
+ /* Inner-to-Outer vlan priority mapping */
+ u8 c2s_pri[MAX_VLAN_PRIORITIES];
+ u8 c2s_pri_default;
+ u8 c2s_pri_valid;
};
struct bnx2x_func_switch_update_params {
@@ -1207,8 +1223,8 @@ struct bnx2x_func_switch_update_params {
u16 vlan;
u16 vlan_eth_type;
u8 vlan_force_prio;
- u8 tunnel_mode;
- u8 gre_tunnel_type;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
};
struct bnx2x_func_afex_update_params {
@@ -1229,6 +1245,7 @@ struct bnx2x_func_tx_start_params {
u8 dcb_enabled;
u8 dcb_version;
u8 dont_add_pri_0_en;
+ u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
};
struct bnx2x_func_set_timesync_params {
@@ -1396,6 +1413,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *vlans_pool);
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool);
+
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
@@ -1466,6 +1491,8 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
struct bnx2x_credit_pool_obj *p, u8 func_id,
u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit);
/****************** RSS CONFIGURATION ****************/
void bnx2x_init_rss_config_obj(struct bnx2x *bp,
@@ -1493,4 +1520,12 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
+#define PF_MAC_CREDIT_E2(bp, func_num) \
+ ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+ func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num) \
+ ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+ func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index f67348d16966..9d027348cd09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.c: Broadcom Everest network driver.
+/* bnx2x_sriov.c: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -195,14 +197,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
setup_p->gen_params.fp_hsi = vf->fp_hsi;
- /* Setup-op pause params:
- * Nothing to do, the pause thresholds are set by default to 0 which
- * effectively turns off the feature for this queue. We don't want
- * one queue (VF) to interfering with another queue (another VF)
- */
- if (vf->cfg_flags & VF_CFG_FW_FC)
- BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
- vf->abs_vfid);
/* Setup-op flags:
* collect statistics, zero statistics, local-switching, security,
* OV for Flex10, RSS and MCAST for leading
@@ -358,22 +352,24 @@ static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
}
static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
- int qid, bool drv_only, bool mac)
+ int qid, bool drv_only, int type)
{
struct bnx2x_vlan_mac_ramrod_params ramrod;
int rc;
DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
- mac ? "MACs" : "VLANs");
+ (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+ (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
/* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
- if (mac) {
+ if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+ } else if (type == BNX2X_VF_FILTER_MAC) {
set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
} else {
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
- &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
}
ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
@@ -391,14 +387,11 @@ static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
&ramrod.ramrod_flags);
if (rc) {
BNX2X_ERR("Failed to delete all %s\n",
- mac ? "MACs" : "VLANs");
+ (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+ (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
return rc;
}
- /* Clear the vlan counters */
- if (!mac)
- atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
-
return 0;
}
@@ -412,13 +405,17 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
vf->abs_vfid, filter->add ? "Adding" : "Deleting",
- filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+ (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+ (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
/* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
- if (filter->type == BNX2X_VF_FILTER_VLAN) {
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
- &ramrod.user_req.vlan_mac_flags);
+ if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+ ramrod.user_req.u.vlan.vlan = filter->vid;
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
ramrod.user_req.u.vlan.vlan = filter->vid;
} else {
@@ -429,16 +426,6 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
BNX2X_VLAN_MAC_DEL;
- /* Verify there are available vlan credits */
- if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
- (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
- vf_vlan_rules_cnt(vf))) {
- BNX2X_ERR("No credits for vlan [%d >= %d]\n",
- atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
- vf_vlan_rules_cnt(vf));
- return -ENOMEM;
- }
-
set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
if (drv_only)
set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
@@ -450,16 +437,13 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
if (rc && rc != -EEXIST) {
BNX2X_ERR("Failed to %s %s\n",
filter->add ? "add" : "delete",
- filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
- "VLAN");
+ (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+ "VLAN-MAC" :
+ (filter->type == BNX2X_VF_FILTER_MAC) ?
+ "MAC" : "VLAN");
return rc;
}
- /* Update the vlan counters */
- if (filter->type == BNX2X_VF_FILTER_VLAN)
- bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
- &bnx2x_vfq(vf, qid, vlan_count));
-
return 0;
}
@@ -511,21 +495,7 @@ int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
if (rc)
goto op_err;
- /* Configure vlan0 for leading queue */
- if (!qid) {
- struct bnx2x_vf_mac_vlan_filter filter;
-
- memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
- filter.type = BNX2X_VF_FILTER_VLAN;
- filter.add = true;
- filter.vid = 0;
- rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
- if (rc)
- goto op_err;
- }
-
/* Schedule the configuration of any pending vlan filters */
- vf->cfg_flags |= VF_CFG_VLAN;
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_MSG_IOV);
return 0;
@@ -544,10 +514,16 @@ static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* If needed, clean the filtering data base */
if ((qid == LEADING_IDX) &&
bnx2x_validate_vf_sp_objs(bp, vf, false)) {
- rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_VLAN_MAC);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_VLAN);
if (rc)
goto op_err;
- rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_MAC);
if (rc)
goto op_err;
}
@@ -680,11 +656,18 @@ int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
/* Remove filtering if feasible */
if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
- false, false);
+ false,
+ BNX2X_VF_FILTER_VLAN_MAC);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false,
+ BNX2X_VF_FILTER_VLAN);
if (rc)
goto op_err;
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
- false, true);
+ false,
+ BNX2X_VF_FILTER_MAC);
if (rc)
goto op_err;
rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
@@ -765,8 +748,6 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
- if (vf->cfg_flags & VF_CFG_INT_SIMD)
- val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK;
val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
@@ -845,29 +826,6 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
return 0;
}
-static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- int new)
-{
- int num = vf_vlan_rules_cnt(vf);
- int diff = new - num;
- bool rc = true;
-
- DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
- vf->abs_vfid, new, num);
-
- if (diff > 0)
- rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
- else if (diff < 0)
- rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
-
- if (rc)
- vf_vlan_rules_cnt(vf) = new;
- else
- DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
- vf->abs_vfid);
-}
-
/* must be called after the number of PF queues and the number of VFs are
* both known
*/
@@ -875,21 +833,13 @@ static void
bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct vf_pf_resc_request *resc = &vf->alloc_resc;
- u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0;
resc->num_txqs = 0;
- /* no credit calculations for macs (just yet) */
- resc->num_mac_filters = 1;
-
- /* divvy up vlan rules */
- bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
- vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
- vlan_count = 1 << ilog2(vlan_count);
- bnx2x_iov_re_set_vlan_filters(bp, vf,
- vlan_count / BNX2X_NR_VIRTFN(bp));
+ resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+ resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
/* no real limitation */
resc->num_mc_filters = 0;
@@ -1338,6 +1288,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
mutex_init(&bp->vfdb->bulletin_mutex);
+ if (SHMEM2_HAS(bp, sriov_switch_mode))
+ SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
+
return 0;
failed:
DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -1620,6 +1573,11 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
vf->filter_state = 0;
vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
+ bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+ vf_vlan_rules_cnt(vf));
+ bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+ vf_mac_rules_cnt(vf));
+
/* init mcast object - This object will be re-initialized
* during VF-ACQUIRE with the proper cl_id and cid.
* It needs to be initialized here so that it can be safely
@@ -2032,12 +1990,11 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
- /* Save a vlan filter for the Hypervisor */
return ((req_resc->num_rxqs <= rxq_cnt) &&
(req_resc->num_txqs <= txq_cnt) &&
(req_resc->num_sbs <= vf_sb_count(vf)) &&
(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
- (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
+ (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
}
/* CORE VF API */
@@ -2091,16 +2048,12 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_sb_count(vf) = resc->num_sbs;
vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
- if (resc->num_mac_filters)
- vf_mac_rules_cnt(vf) = resc->num_mac_filters;
- /* Add an additional vlan filter credit for the hypervisor */
- bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
DP(BNX2X_MSG_IOV,
"Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
vf_sb_count(vf), vf_rxq_count(vf),
vf_txq_count(vf), vf_mac_rules_cnt(vf),
- vf_vlan_rules_visible_cnt(vf));
+ vf_vlan_rules_cnt(vf));
/* Initialize the queues */
if (!vf->vfqs) {
@@ -2133,7 +2086,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
{
struct bnx2x_func_init_params func_init = {0};
- u16 flags = 0;
int i;
/* the sb resources are initialized at this point, do the
@@ -2160,23 +2112,9 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
/* reset IGU VF statistics: MSIX */
REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
- /* vf init */
- if (vf->cfg_flags & VF_CFG_STATS)
- flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
-
- if (vf->cfg_flags & VF_CFG_TPA)
- flags |= FUNC_FLG_TPA;
-
- if (is_vf_multi(vf))
- flags |= FUNC_FLG_RSS;
-
/* function setup */
- func_init.func_flgs = flags;
func_init.pf_id = BP_FUNC(bp);
func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
- func_init.fw_stat_map = vf->fw_stat_map;
- func_init.spq_map = vf->spq_map;
- func_init.spq_prod = 0;
bnx2x_func_init(bp, &func_init);
/* Enable the vf */
@@ -2589,8 +2527,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
for_each_vf(bp, vfidx) {
- bulletin = BP_VF_BULLETIN(bp, vfidx);
- if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
}
}
@@ -2808,20 +2746,58 @@ out:
return rc;
}
-int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, bool accept)
+{
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+ unsigned long accept_flags;
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (accept)
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ u16 vlan, bool add)
{
- struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- struct bnx2x_queue_update_params *update_params;
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+ : BNX2X_VLAN_MAC_DEL;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
struct pf_vf_bulletin_content *bulletin = NULL;
- struct bnx2x_rx_mode_ramrod_params rx_ramrod;
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_mac_obj *vlan_obj;
unsigned long vlan_mac_flags = 0;
unsigned long ramrod_flags = 0;
struct bnx2x_virtf *vf = NULL;
- unsigned long accept_flags;
- int rc;
+ int i, rc;
if (vlan > 4095) {
BNX2X_ERR("illegal vlan value %d\n", vlan);
@@ -2850,6 +2826,10 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
bulletin->vlan = vlan;
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+ if (rc)
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
mutex_unlock(&bp->vfdb->bulletin_mutex);
/* is vf initialized and queue set up? */
@@ -2876,84 +2856,76 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
goto out;
}
- /* need to remove/add the VF's accept_any_vlan bit */
- accept_flags = bnx2x_leading_vfq(vf, accept_flags);
- if (vlan)
- clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
- else
- set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-
- bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
- accept_flags);
- bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
- bnx2x_config_rx_mode(bp, &rx_ramrod);
+ /* clear accept_any_vlan when HV forces vlan, otherwise
+ * according to VF capabilities
+ */
+ if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+ bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
- /* configure the new vlan to device */
- memset(&ramrod_param, 0, sizeof(ramrod_param));
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- ramrod_param.vlan_mac_obj = vlan_obj;
- ramrod_param.ramrod_flags = ramrod_flags;
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
- &ramrod_param.user_req.vlan_mac_flags);
- ramrod_param.user_req.u.vlan.vlan = vlan;
- ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
- rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
- if (rc) {
- BNX2X_ERR("failed to configure vlan\n");
- rc = -EINVAL;
+ rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+ if (rc)
goto out;
- }
- /* send queue update ramrod to configure default vlan and silent
- * vlan removal
+ /* send queue update ramrods to configure default vlan and
+ * silent vlan removal
*/
- __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
- &update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
- &update_params->update_flags);
- if (vlan == 0) {
- /* if vlan is 0 then we want to leave the VF traffic
- * untagged, and leave the incoming traffic untouched
- * (i.e. do not remove any vlan tags).
- */
- __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- } else {
- /* configure default vlan to vf queue and set silent
- * vlan removal (the vf remains unaware of this vlan).
- */
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ for_each_vfq(vf, i) {
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+ /* validate the Q is UP */
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ continue;
+
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
&update_params->update_flags);
- update_params->def_vlan = vlan;
- update_params->silent_removal_value =
- vlan & VLAN_VID_MASK;
- update_params->silent_removal_mask = VLAN_VID_MASK;
- }
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure default VLAN\n");
- goto out;
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+ i);
+ goto out;
+ }
}
-
-
- /* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured the Vlan before vf was
- * up and we were called because the VF came up later
- */
out:
- vf->cfg_flags &= ~VF_CFG_VLAN;
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ if (rc)
+ DP(BNX2X_MSG_IOV,
+ "updated VF[%d] vlan configuration (vlan = %d)\n",
+ vfidx, vlan);
+
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 66ee62a0401a..670a581ffabc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.h: Broadcom Everest network driver.
+/* bnx2x_sriov.h: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -75,7 +77,10 @@ struct bnx2x_vf_queue {
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
- atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+
+ /* VLAN-MACs object */
+ struct bnx2x_vlan_mac_obj vlan_mac_obj;
+
unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */
@@ -103,8 +108,10 @@ struct bnx2x_virtf;
struct bnx2x_vf_mac_vlan_filter {
int type;
-#define BNX2X_VF_FILTER_MAC 1
-#define BNX2X_VF_FILTER_VLAN 2
+#define BNX2X_VF_FILTER_MAC BIT(0)
+#define BNX2X_VF_FILTER_VLAN BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+ (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
bool add;
u8 *mac;
@@ -119,14 +126,9 @@ struct bnx2x_vf_mac_vlan_filters {
/* vf context */
struct bnx2x_virtf {
u16 cfg_flags;
-#define VF_CFG_STATS 0x0001
-#define VF_CFG_FW_FC 0x0002
-#define VF_CFG_TPA 0x0004
-#define VF_CFG_INT_SIMD 0x0008
-#define VF_CACHE_LINE 0x0010
-#define VF_CFG_VLAN 0x0020
-#define VF_CFG_STATS_COALESCE 0x0040
-#define VF_CFG_EXT_BULLETIN 0x0080
+#define VF_CFG_STATS_COALESCE 0x1
+#define VF_CFG_EXT_BULLETIN 0x2
+#define VF_CFG_VLAN_FILTER 0x4
u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO
* IFLA_VF_LINK_STATE_ENABLE
* IFLA_VF_LINK_STATE_DISABLE
@@ -140,9 +142,8 @@ struct bnx2x_virtf {
bool flr_clnup_stage; /* true during flr cleanup */
/* dma */
- dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ dma_addr_t fw_stat_map;
u16 stats_stride;
- dma_addr_t spq_map;
dma_addr_t bulletin_map;
/* Allocated resources counters. Before the VF is acquired, the
@@ -163,8 +164,6 @@ struct bnx2x_virtf {
#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
- /* Hide a single vlan filter credit for the hypervisor */
-#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
u8 sb_count; /* actual number of SBs */
u8 igu_base_id; /* base igu status block id */
@@ -207,6 +206,9 @@ struct bnx2x_virtf {
enum channel_tlvs op_current;
u8 fp_hsi;
+
+ struct bnx2x_credit_pool_obj vf_vlans_pool;
+ struct bnx2x_credit_pool_obj vf_macs_pool;
};
#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
@@ -230,6 +232,12 @@ struct bnx2x_virtf {
#define FW_VF_HANDLE(abs_vfid) \
(abs_vfid + FW_PF_MAX_HANDLE)
+#define GET_NUM_VFS_PER_PATH(bp) 64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \
+ : 0)
+#define VF_MAC_CREDIT_CNT 1
+#define VF_VLAN_CREDIT_CNT 2 /* VLAN0 + 'real' VLAN */
+
/* locking and unlocking the channel mutex */
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv);
@@ -274,6 +282,10 @@ struct bnx2x_vf_sp {
} vlan_rdata;
union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_mac_rdata;
+
+ union {
struct eth_filter_rules_ramrod_data e2;
} rx_mode_rdata;
@@ -536,8 +548,14 @@ int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
#else /* CONFIG_BNX2X_SRIOV */
+#define GET_NUM_VFS_PER_PATH(bp) 0
+#define GET_NUM_VFS_PER_PF(bp) 0
+#define VF_MAC_CREDIT_CNT 0
+#define VF_VLAN_CREDIT_CNT 0
+
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
@@ -604,5 +622,7 @@ struct pf_vf_bulletin_content;
static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
bool support_long) {}
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 69d699f0730a..7e0919aa450e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,8 @@
-/* bnx2x_stats.c: Broadcom Everest network driver.
+/* bnx2x_stats.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 965539a9dabe..b2644ed13d06 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,8 @@
-/* bnx2x_stats.h: Broadcom Everest network driver.
+/* bnx2x_stats.h: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 06b8c0d8fd3b..1374e5394a79 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1,15 +1,17 @@
-/* bnx2x_vfpf.c: Broadcom Everest network driver.
+/* bnx2x_vfpf.c: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -245,6 +247,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
req->resc_request.num_sbs = bp->igu_sb_cnt;
req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+ req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
/* pf 2 vf bulletin board address */
req->bulletin_addr = bp->pf2vf_bulletin_mapping;
@@ -255,6 +258,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* Bulletin support for bulletin board with length > legacy length */
req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+ /* vlan filtering is supported */
+ req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
/* add list termination tlv */
bnx2x_add_tlv(bp, req,
@@ -373,6 +378,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+ bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
@@ -546,7 +553,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
BNX2X_FILTER_MAC_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
- &bp->macs_pool);
+ &vf->vf_macs_pool);
/* vlan */
bnx2x_init_vlan_obj(bp, &q->vlan_obj,
cl_id, q->cid, func_id,
@@ -555,8 +562,17 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
BNX2X_FILTER_VLAN_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
- &bp->vlans_pool);
-
+ &vf->vf_vlans_pool);
+ /* vlan-mac */
+ bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+ BNX2X_FILTER_VLAN_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &vf->vf_macs_pool,
+ &vf->vf_vlans_pool);
/* mcast */
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
q->cid, func_id, func_id,
@@ -723,7 +739,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
if (set)
- req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
+ req->filters[0].flags |= VFPF_Q_FILTER_SET;
/* sample bulletin board for new mac */
bnx2x_sample_bulletin(bp);
@@ -911,6 +927,67 @@ out:
return 0;
}
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc = 0;
+
+ if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+ DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+ return 0;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = vf_qid;
+ req->n_mac_vlan_filters = 1;
+
+ req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+ if (add)
+ req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+ /* sample bulletin board for hypervisor vlan */
+ bnx2x_sample_bulletin(bp);
+
+ if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+ BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ req->filters[0].vlan_tag = vid;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+ vid);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
{
int mode = bp->rx_mode;
@@ -934,8 +1011,13 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ if (mode == BNX2X_RX_MODE_PROMISC)
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
}
+ if (bp->accept_any_vlan)
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
req->vf_qid = 0;
@@ -1188,7 +1270,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
PFVF_CAP_TPA |
- PFVF_CAP_TPA_UPDATE);
+ PFVF_CAP_TPA_UPDATE |
+ PFVF_CAP_VLAN_FILTER);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver));
@@ -1203,7 +1286,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_max_queue_cnt(bp, vf);
resc->num_sbs = vf_sb_count(vf);
resc->num_mac_filters = vf_mac_rules_cnt(vf);
- resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
resc->num_mc_filters = 0;
if (status == PFVF_STATUS_SUCCESS) {
@@ -1370,6 +1453,14 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
}
+ if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+ DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+ vf->abs_vfid);
+ vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+ } else {
+ vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+ }
+
out:
/* response */
bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
@@ -1382,7 +1473,6 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
int rc;
/* record ghost addresses from vf message */
- vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride;
rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
@@ -1578,17 +1668,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
if ((msg_filter->flags & type_flag) != type_flag)
continue;
- if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+ memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+ if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
fl->filters[j].mac = msg_filter->mac;
- fl->filters[j].type = BNX2X_VF_FILTER_MAC;
- } else {
+ fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
+ }
+ if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
fl->filters[j].vid = msg_filter->vlan_tag;
- fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
+ fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
}
- fl->filters[j].add =
- (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
- true : false;
+ fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
fl->count++;
+ j++;
}
if (!fl->count)
kfree(fl);
@@ -1598,6 +1689,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
return 0;
}
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+ u32 flags)
+{
+ int i, cnt = 0;
+
+ for (i = 0; i < filters->n_mac_vlan_filters; i++)
+ if ((filters->filters[i].flags & flags) == flags)
+ cnt++;
+
+ return cnt;
+}
+
static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
struct vfpf_q_mac_vlan_filter *filter)
{
@@ -1629,6 +1732,7 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
@@ -1639,17 +1743,17 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* check for any mac/vlan changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
- /* build mac list */
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+ /* build vlan-mac list */
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_MAC_FILTER);
+ VFPF_VLAN_MAC_FILTER);
if (rc)
goto op_err;
if (fl) {
- /* set mac list */
+ /* set vlan-mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
@@ -1657,22 +1761,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
}
- /* build vlan list */
+ /* build mac list */
fl = NULL;
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_VLAN_FILTER);
+ VFPF_MAC_FILTER);
if (rc)
goto op_err;
if (fl) {
- /* set vlan list */
+ /* set mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
if (rc)
goto op_err;
}
+
}
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
@@ -1687,11 +1792,15 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
}
- /* A packet arriving the vf's mac should be accepted
- * with any vlan, unless a vlan has already been
- * configured.
+ /* any_vlan is not configured if HV is forcing VLAN
+ * any_vlan is configured if
+ * 1. VF does not support vlan filtering
+ * OR
+ * 2. VF supports vlan filtering and explicitly requested it
*/
- if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+ (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+ msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */
@@ -1727,17 +1836,31 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
* since queue was not set up.
*/
if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
- /* once a mac was set by ndo can only accept a single mac... */
- if (filters->n_mac_vlan_filters > 1) {
- BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
- vf->abs_vfid);
- rc = -EPERM;
- goto response;
+ struct vfpf_q_mac_vlan_filter *filter = NULL;
+ int i;
+
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (!(filters->filters[i].flags &
+ VFPF_Q_FILTER_DEST_MAC_VALID))
+ continue;
+
+ /* once a mac was set by ndo can only accept
+ * a single mac...
+ */
+ if (filter) {
+ BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+ vf->abs_vfid,
+ filters->n_mac_vlan_filters);
+ rc = -EPERM;
+ goto response;
+ }
+
+ filter = &filters->filters[i];
}
/* ...and only the mac set by the ndo */
- if (filters->n_mac_vlan_filters == 1 &&
- !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
+ if (filter &&
+ !ether_addr_equal(filter->mac, bulletin->mac)) {
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
@@ -1759,17 +1882,14 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
/* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
- int i;
-
/* search for vlan filters */
- for (i = 0; i < filters->n_mac_vlan_filters; i++) {
- if (filters->filters[i].flags &
- VFPF_Q_FILTER_VLAN_TAG_VALID) {
- BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
- vf->abs_vfid);
- rc = -EPERM;
- goto response;
- }
+
+ if (bnx2x_vf_filters_contain(filters,
+ VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ rc = -EPERM;
+ goto response;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index b86479fc0d2f..64f2b52c5829 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -1,16 +1,22 @@
-/* bnx2x_vfpf.h: Broadcom Everest network driver.
+/* bnx2x_vfpf.h: Qlogic Everest network driver.
*
* Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * under the terms of the GNU General Public License version 2 (the “GPL”),
+ * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following
+ * added to such license:
*
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module. An independent module is a module which is
+ * not derived from this software. The special exception does not apply to any
+ * modifications of the software.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Ariel Elior <ariel.elior@qlogic.com>
@@ -64,6 +70,8 @@ struct hw_sb_info {
#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020
+
#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
#define BULLETIN_CONTENT_LEGACY_SIZE (32)
#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
@@ -127,6 +135,7 @@ struct vfpf_acquire_tlv {
u8 fp_hsi_ver;
u8 caps;
#define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER (1 << 1)
} vfdev_info;
struct vf_pf_resc_request resc_request;
@@ -168,10 +177,12 @@ struct pfvf_acquire_resp_tlv {
struct pf_vf_pfdev_info {
u32 chip_num;
u32 pf_cap;
-#define PFVF_CAP_RSS 0x00000001
-#define PFVF_CAP_DHC 0x00000002
-#define PFVF_CAP_TPA 0x00000004
-#define PFVF_CAP_TPA_UPDATE 0x00000008
+#define PFVF_CAP_RSS 0x00000001
+#define PFVF_CAP_DHC 0x00000002
+#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
+#define PFVF_CAP_VLAN_FILTER 0x00000010
+
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
@@ -288,7 +299,7 @@ struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
-#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+#define VFPF_Q_FILTER_SET 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 17c145fdf3ff..b69dc58faeab 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -192,6 +192,7 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_CTX_WR_CMD;
io->cid_addr = cid_addr;
io->offset = off;
@@ -206,6 +207,7 @@ static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_CTXTBL_WR_CMD;
io->offset = off;
io->dma_addr = addr;
@@ -219,6 +221,7 @@ static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
struct drv_ctl_info info;
struct drv_ctl_l2_ring *ring = &info.data.ring;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
if (start)
info.cmd = DRV_CTL_START_L2_CMD;
else
@@ -236,6 +239,7 @@ static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_IO_WR_CMD;
io->offset = off;
io->data = val;
@@ -249,13 +253,14 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_IO_RD_CMD;
io->offset = off;
ethdev->drv_ctl(dev->netdev, &info);
return io->data;
}
-static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
@@ -263,6 +268,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
struct fcoe_capabilities *fcoe_cap =
&info.data.register_data.fcoe_features;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
if (reg) {
info.cmd = DRV_CTL_ULP_REGISTER_CMD;
if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
@@ -272,6 +278,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
}
info.data.ulp_type = ulp_type;
+ info.drv_state = state;
ethdev->drv_ctl(dev->netdev, &info);
}
@@ -286,6 +293,7 @@ static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = cmd;
info.data.credit.credit_count = count;
ethdev->drv_ctl(dev->netdev, &info);
@@ -591,7 +599,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
mutex_unlock(&cnic_lock);
- cnic_ulp_ctl(dev, ulp_type, true);
+ cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
return 0;
@@ -636,7 +644,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
- cnic_ulp_ctl(dev, ulp_type, false);
+ if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+ cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
+ else
+ cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
return 0;
}
@@ -4267,6 +4278,7 @@ static void cnic_delete_task(struct work_struct *work)
cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
cp->ethdev->drv_ctl(dev->netdev, &info);
}
@@ -5433,6 +5445,23 @@ static void cnic_free_dev(struct cnic_dev *dev)
kfree(dev);
}
+static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
+ struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ int ret;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2x is down */
+
+ if (!BNX2X_CHIP_IS_E2_PLUS(bp))
+ return -EINVAL;
+
+ ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
+ return ret;
+}
+
static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
struct pci_dev *pdev)
{
@@ -5451,6 +5480,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cdev->register_device = cnic_register_device;
cdev->unregister_device = cnic_unregister_device;
cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+ cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
cp = cdev->cnic_priv;
cp->dev = cdev;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ef6125b0ee3e..789e5c7e9311 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -15,8 +15,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.21"
-#define CNIC_MODULE_RELDATE "January 29, 2015"
+#define CNIC_MODULE_VERSION "2.5.22"
+#define CNIC_MODULE_RELDATE "July 20, 2015"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -151,6 +151,11 @@ struct drv_ctl_register_data {
struct drv_ctl_info {
int cmd;
+ int drv_state;
+#define DRV_NOP 0
+#define DRV_ACTIVE 1
+#define DRV_INACTIVE 2
+#define DRV_UNLOADED 3
union {
struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
@@ -161,6 +166,15 @@ struct drv_ctl_info {
} data;
};
+#define MAX_NPIV_ENTRIES 64
+#define FC_NPIV_WWN_SIZE 8
+
+struct cnic_fc_npiv_tbl {
+ u8 wwpn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+ u8 wwnn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+ u32 count;
+};
+
struct cnic_ops {
struct module *cnic_owner;
/* Calls to these functions are protected by RCU. When
@@ -226,6 +240,8 @@ struct cnic_eth_dev {
int (*drv_submit_kwqes_16)(struct net_device *,
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+ int (*drv_get_fc_npiv_tbl)(struct net_device *,
+ struct cnic_fc_npiv_tbl *);
unsigned long reserved1[2];
union drv_info_to_mcp *addr_drv_info_to_mcp;
};
@@ -314,6 +330,7 @@ struct cnic_dev {
struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
+ int (*get_fc_npiv_tbl)(struct cnic_dev *, struct cnic_fc_npiv_tbl *);
unsigned long flags;
#define CNIC_F_CNIC_UP 1
#define CNIC_F_BNX2_CLASS 3
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 64c1e9db6b0b..eb080ef8ee97 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -907,9 +907,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-
if (mode == GENET_POWER_PASSIVE)
- bcmgenet_mii_reset(priv->dev);
+ bcmgenet_phy_power_set(priv->dev, true);
}
/* ioctl handle special commands that are not present in ethtool. */
@@ -1725,7 +1724,7 @@ static int init_umac(struct bcmgenet_priv *priv)
int0_enable |= UMAC_IRQ_TXDMA_DONE;
/* Monitor cable plug/unplugged event for internal PHY */
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
@@ -2389,6 +2388,23 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcmgenet_poll_controller(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Invoke the main RX/TX interrupt handler */
+ disable_irq(priv->irq0);
+ bcmgenet_isr0(priv->irq0, priv);
+ enable_irq(priv->irq0);
+
+ /* And the interrupt handler for RX/TX priority queues */
+ disable_irq(priv->irq1);
+ bcmgenet_isr1(priv->irq1, priv);
+ enable_irq(priv->irq1);
+}
+#endif
+
static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
{
u32 reg;
@@ -2626,13 +2642,12 @@ static int bcmgenet_open(struct net_device *dev)
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
/* Turn on the clock */
- if (!IS_ERR(priv->clk))
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
/* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
/* take MAC out of reset */
@@ -2651,7 +2666,7 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
@@ -2687,23 +2702,24 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
- /* Re-configure the port multiplexer towards the PHY device */
- bcmgenet_mii_config(priv->dev, false);
-
- phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
- priv->phy_interface);
+ ret = bcmgenet_mii_probe(dev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to PHY\n");
+ goto err_irq1;
+ }
bcmgenet_netif_start(dev);
return 0;
+err_irq1:
+ free_irq(priv->irq1, priv);
err_irq0:
- free_irq(priv->irq0, dev);
+ free_irq(priv->irq0, priv);
err_fini_dma:
bcmgenet_fini_dma(priv);
err_clk_disable:
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2757,11 +2773,10 @@ static int bcmgenet_close(struct net_device *dev)
free_irq(priv->irq0, priv);
free_irq(priv->irq1, priv);
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2941,6 +2956,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_do_ioctl = bcmgenet_ioctl,
.ndo_set_features = bcmgenet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bcmgenet_poll_controller,
+#endif
};
/* Array of GENET hardware parameters/characteristics */
@@ -3214,11 +3232,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->version = pd->genet_version;
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
- if (IS_ERR(priv->clk))
+ if (IS_ERR(priv->clk)) {
dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+ priv->clk = NULL;
+ }
- if (!IS_ERR(priv->clk))
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
bcmgenet_set_hw_params(priv);
@@ -3229,8 +3248,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
- if (IS_ERR(priv->clk_wol))
+ if (IS_ERR(priv->clk_wol)) {
dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+ priv->clk_wol = NULL;
+ }
priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
@@ -3256,8 +3277,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
netif_carrier_off(dev);
/* Turn off the main clock, WOL clock is handled separately */
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
err = register_netdev(dev);
if (err)
@@ -3266,8 +3286,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
return err;
err_clk_disable:
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
err:
free_netdev(dev);
return err;
@@ -3319,7 +3338,7 @@ static int bcmgenet_suspend(struct device *d)
if (device_may_wakeup(d) && priv->wolopts) {
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
clk_prepare_enable(priv->clk_wol);
- } else if (phy_is_internal(priv->phydev)) {
+ } else if (priv->internal_phy) {
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
}
@@ -3348,7 +3367,7 @@ static int bcmgenet_resume(struct device *d)
/* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
bcmgenet_umac_reset(priv);
@@ -3363,14 +3382,14 @@ static int bcmgenet_resume(struct device *d)
phy_init_hw(priv->phydev);
/* Speed settings must be restored */
- bcmgenet_mii_config(priv->dev, false);
+ bcmgenet_mii_config(priv->dev);
/* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 6159deab8c98..7299d1075422 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -593,6 +593,7 @@ struct bcmgenet_priv {
/* MDIO bus variables */
wait_queue_head_t wq;
struct phy_device *phydev;
+ bool internal_phy;
struct device_node *phy_dn;
struct device_node *mdio_dn;
struct mii_bus *mii_bus;
@@ -670,9 +671,9 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev, bool init);
+int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
-void bcmgenet_mii_reset(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index adf23d2ac488..b3679ad1c1c7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -163,14 +163,13 @@ void bcmgenet_mii_setup(struct net_device *dev)
phy_print_status(phydev);
}
-void bcmgenet_mii_reset(struct net_device *dev)
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+ struct fixed_phy_status *status)
{
- struct bcmgenet_priv *priv = netdev_priv(dev);
+ if (dev && dev->phydev && status)
+ status->link = dev->phydev->link;
- if (priv->phydev) {
- phy_init_hw(priv->phydev);
- phy_start_aneg(priv->phydev);
- }
+ return 0;
}
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
@@ -215,7 +214,6 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_PWR_DN_EN_LD;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- bcmgenet_mii_reset(dev);
}
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
@@ -226,9 +224,13 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
reg |= LED_ACT_SOURCE_MAC;
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+
+ if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ fixed_phy_set_link_update(priv->phydev,
+ bcmgenet_fixed_phy_link_update);
}
-int bcmgenet_mii_config(struct net_device *dev, bool init)
+int bcmgenet_mii_config(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -238,10 +240,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
u32 port_ctrl;
u32 reg;
- priv->ext_phy = !phy_is_internal(priv->phydev) &&
+ priv->ext_phy = !priv->internal_phy &&
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
priv->phy_interface = PHY_INTERFACE_MODE_NA;
switch (priv->phy_interface) {
@@ -259,7 +261,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
phy_name = "internal PHY";
bcmgenet_internal_phy_setup(dev);
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -321,13 +323,12 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
- if (init)
- dev_info(kdev, "configuring instance for %s\n", phy_name);
+ dev_info_once(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
-static int bcmgenet_mii_probe(struct net_device *dev)
+int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device_node *dn = priv->pdev->dev.of_node;
@@ -345,22 +346,6 @@ static int bcmgenet_mii_probe(struct net_device *dev)
priv->old_pause = -1;
if (dn) {
- if (priv->phydev) {
- pr_info("PHY already attached\n");
- return 0;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
- ret = of_phy_register_fixed_link(dn);
- if (ret)
- return ret;
-
- priv->phy_dn = of_node_get(dn);
- }
-
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
phy_flags, priv->phy_interface);
if (!phydev) {
@@ -386,7 +371,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
- ret = bcmgenet_mii_config(dev, true);
+ ret = bcmgenet_mii_config(dev);
if (ret) {
phy_disconnect(priv->phydev);
return ret;
@@ -397,14 +382,11 @@ static int bcmgenet_mii_probe(struct net_device *dev)
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs
*/
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
else
priv->mii_bus->irq[phydev->addr] = PHY_POLL;
- pr_info("attached PHY at address %d [%s]\n",
- phydev->addr, phydev->drv->name);
-
return 0;
}
@@ -490,7 +472,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
+ const char *phy_mode_str = NULL;
+ struct phy_device *phydev = NULL;
char *compat;
+ int phy_mode;
int ret;
compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
@@ -513,17 +498,43 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
/* Fetch the PHY phandle */
priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret)
+ return ret;
+
+ priv->phy_dn = of_node_get(dn);
+ }
+
/* Get the link mode */
- priv->phy_interface = of_get_phy_mode(dn);
+ phy_mode = of_get_phy_mode(dn);
+ priv->phy_interface = phy_mode;
- return 0;
-}
+ /* We need to specifically look up whether this PHY interface is internal
+ * or not *before* we even try to probe the PHY driver over MDIO as we
+ * may have shut down the internal PHY for power saving purposes.
+ */
+ if (phy_mode < 0) {
+ ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
+ if (ret < 0) {
+ dev_err(kdev, "invalid PHY mode property\n");
+ return ret;
+ }
-static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
- struct fixed_phy_status *status)
-{
- if (dev && dev->phydev && status)
- status->link = dev->phydev->link;
+ priv->phy_interface = PHY_INTERFACE_MODE_NA;
+ if (!strcasecmp(phy_mode_str, "internal"))
+ priv->internal_phy = true;
+ }
+
+ /* Make sure we initialize MoCA PHYs with a link down */
+ if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+ phydev = of_phy_find_device(dn);
+ if (phydev)
+ phydev->link = 0;
+ }
return 0;
}
@@ -580,12 +591,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
return -ENODEV;
}
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
- ret = fixed_phy_set_link_update(
- phydev, bcmgenet_fixed_phy_link_update);
- if (!ret)
- phydev->link = 0;
- }
+ /* Make sure we initialize MoCA PHYs with a link down */
+ phydev->link = 0;
+
}
priv->phydev = phydev;
@@ -615,10 +623,6 @@ int bcmgenet_mii_init(struct net_device *dev)
ret = bcmgenet_mii_bus_init(priv);
if (ret)
- goto out_free;
-
- ret = bcmgenet_mii_probe(dev);
- if (ret)
goto out;
return 0;
@@ -626,7 +630,6 @@ int bcmgenet_mii_init(struct net_device *dev)
out:
of_node_put(priv->phy_dn);
mdiobus_unregister(priv->mii_bus);
-out_free:
kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
return ret;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index bf9eb2ecf960..88c1e1a834f8 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2774,8 +2774,7 @@ static const struct macb_config emac_config = {
static const struct macb_config zynqmp_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -2783,8 +2782,7 @@ static const struct macb_config zynqmp_config = {
};
static const struct macb_config zynq_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_NO_GIGABIT_HALF,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 1895b6b2addd..6e1faea00ca8 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -399,7 +399,7 @@
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_JUMBO 0x00000008
+#define MACB_CAPS_JUMBO 0x00000010
/* Bit manipulation macros */
#define MACB_BIT(name) \
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 02e23e6f1424..9b35d142f47a 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -34,6 +34,8 @@ config THUNDER_NIC_VF
config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT
+ select PHYLIB
+ select MDIO_OCTEON
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index b961a89dc626..5e541862f65e 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -6,6 +6,7 @@
* as published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -26,7 +27,7 @@
struct lmac {
struct bgx *bgx;
int dmac;
- unsigned char mac[ETH_ALEN];
+ u8 mac[ETH_ALEN];
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@@ -835,18 +836,108 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
}
}
-static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+#ifdef CONFIG_ACPI
+
+static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
+ "mac-address", mac, ETH_ALEN);
+ if (ret)
+ goto out;
+
+ if (!is_valid_ether_addr(mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(dst, mac, ETH_ALEN);
+out:
+ return ret;
+}
+
+/* Currently only sets the MAC address. */
+static acpi_status bgx_acpi_register_phy(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ struct bgx *bgx = context;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ goto out;
+
+ acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
+
+ SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
+
+ bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+out:
+ bgx->lmac_count++;
+ return AE_OK;
+}
+
+static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
+ void *context, void **ret_val)
+{
+ struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct bgx *bgx = context;
+ char bgx_sel[5];
+
+ snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+ if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
+ pr_warn("Invalid link device\n");
+ return AE_OK;
+ }
+
+ if (strncmp(string.pointer, bgx_sel, 4))
+ return AE_OK;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ bgx_acpi_register_phy, NULL, bgx, NULL);
+
+ kfree(string.pointer);
+ return AE_CTRL_TERMINATE;
+}
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
{
+ acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
+ return 0;
+}
+
+#else
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+ struct device_node *np;
struct device_node *np_child;
u8 lmac = 0;
+ char bgx_sel[5];
+ const char *mac;
- for_each_child_of_node(np, np_child) {
- struct device_node *phy_np;
- const char *mac;
+ /* Get BGX node from DT */
+ snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+ np = of_find_node_by_name(NULL, bgx_sel);
+ if (!np)
+ return -ENODEV;
- phy_np = of_parse_phandle(np_child, "phy-handle", 0);
- if (phy_np)
- bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+ for_each_child_of_node(np, np_child) {
+ struct device_node *phy_np = of_parse_phandle(np_child,
+ "phy-handle", 0);
+ if (!phy_np)
+ continue;
+ bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
mac = of_get_mac_address(np_child);
if (mac)
@@ -858,6 +949,24 @@ static void bgx_init_of(struct bgx *bgx, struct device_node *np)
if (lmac == MAX_LMAC_PER_BGX)
break;
}
+ return 0;
+}
+
+#else
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_OF_MDIO */
+
+static int bgx_init_phy(struct bgx *bgx)
+{
+ if (!acpi_disabled)
+ return bgx_init_acpi_phy(bgx);
+
+ return bgx_init_of_phy(bgx);
}
static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -865,8 +974,6 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
struct device *dev = &pdev->dev;
struct bgx *bgx = NULL;
- struct device_node *np;
- char bgx_sel[5];
u8 lmac;
bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
@@ -902,10 +1009,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_vnic[bgx->bgx_id] = bgx;
bgx_get_qlm_mode(bgx);
- snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
- np = of_find_node_by_name(NULL, bgx_sel);
- if (np)
- bgx_init_of(bgx, np);
+ err = bgx_init_phy(bgx);
+ if (err)
+ goto err_enable;
bgx_init_hw(bgx);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 629f75d70353..58de4443eac0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -767,6 +767,7 @@ struct adapter {
bool tid_release_task_busy;
struct dentry *debugfs_root;
+ u32 use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
spinlock_t stats_lock;
spinlock_t win0_lock ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6074680bc985..052c660aca80 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -31,6 +31,15 @@ static const char * const dcb_ver_array[] = {
"Auto Negotiated"
};
+static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
+{
+ if (state == CXGB4_DCB_STATE_FW_ALLSYNCED ||
+ state == CXGB4_DCB_STATE_HOST)
+ return true;
+ else
+ return false;
+}
+
/* Initialize a port's Data Center Bridging state. Typically used after a
* Link Down event.
*/
@@ -603,7 +612,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
struct port_info *pi = netdev2pinfo(dev);
struct port_dcb_info *dcb = &pi->dcb;
- if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+ if (!cxgb4_dcb_state_synced(dcb->state) ||
priority >= CXGB4_MAX_PRIORITY)
*pfccfg = 0;
else
@@ -620,7 +629,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
struct adapter *adap = pi->adapter;
int err;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+ if (!cxgb4_dcb_state_synced(pi->dcb.state) ||
priority >= CXGB4_MAX_PRIORITY)
return;
@@ -732,7 +741,7 @@ static u8 cxgb4_getpfcstate(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return false;
return pi->dcb.pfcen != 0;
@@ -756,7 +765,7 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
struct adapter *adap = pi->adapter;
int i;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 0;
for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -794,7 +803,9 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
*/
static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
{
- return __cxgb4_getapp(dev, app_idtype, app_id, 0);
+ /* Convert app_idtype to firmware format before querying */
+ return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
+ app_idtype : 3, app_id, 0);
}
/* Write a new Application User Priority Map for the specified Application ID
@@ -808,7 +819,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
int i, err;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return -EINVAL;
/* DCB info gets thrown away on link up */
@@ -896,10 +907,11 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct port_dcb_info *dcb = &pi->dcb;
- if (dcb_subtype && !(dcb->msgs & dcb_subtype))
- return 0;
+ if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+ return 0;
- return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
+ return (cxgb4_dcb_state_synced(dcb->state) &&
(dcb->supported & DCB_CAP_DCBX_VER_IEEE));
}
@@ -1057,7 +1069,7 @@ static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
/* Can't enable DCB if we haven't successfully negotiated it.
*/
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 1;
/* There's currently no mechanism to allow for the firmware DCBX
@@ -1080,7 +1092,7 @@ static int cxgb4_getpeer_app(struct net_device *dev,
struct adapter *adap = pi->adapter;
int i, err = 0;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 1;
info->willing = 0;
@@ -1114,7 +1126,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
struct adapter *adap = pi->adapter;
int i, err = 0;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 1;
for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -1133,7 +1145,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
if (!pcmd.u.dcb.app_priority.protocolid)
break;
- table[i].selector = pcmd.u.dcb.app_priority.sel_field;
+ table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
table[i].protocol =
be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
table[i].priority =
@@ -1181,6 +1193,8 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
+ pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
+
return 0;
}
@@ -1198,6 +1212,8 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
*/
pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
+ pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported;
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c3c7db41819d..6700dcc9960e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -151,6 +151,45 @@ static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
return 0;
}
+static int cim_la_show_t6(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Status Inst Data PC LS0Stat "
+ "LS0Addr LS0Data LS1Stat LS1Addr LS1Data\n");
+ } else {
+ const u32 *p = v;
+
+ seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n",
+ (p[9] >> 16) & 0xff, /* Status */
+ p[9] & 0xffff, p[8] >> 16, /* Inst */
+ p[8] & 0xffff, p[7] >> 16, /* Data */
+ p[7] & 0xffff, p[6] >> 16, /* PC */
+ p[2], p[1], p[0], /* LS0 Stat, Addr and Data */
+ p[5], p[4], p[3]); /* LS1 Stat, Addr and Data */
+ }
+ return 0;
+}
+
+static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Status Inst Data PC\n");
+ } else {
+ const u32 *p = v;
+
+ seq_printf(seq, " %02x %08x %08x %08x\n",
+ p[3] & 0xff, p[2], p[1], p[0]);
+ seq_printf(seq, " %02x %02x%06x %02x%06x %02x%06x\n",
+ (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
+ p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
+ seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x\n",
+ (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
+ p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
+ p[6] >> 16);
+ }
+ return 0;
+}
+
static int cim_la_open(struct inode *inode, struct file *file)
{
int ret;
@@ -162,9 +201,18 @@ static int cim_la_open(struct inode *inode, struct file *file)
if (ret)
return ret;
- p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
- cfg & UPDBGLACAPTPCONLY_F ?
- cim_la_show_3in1 : cim_la_show);
+ if (is_t6(adap->params.chip)) {
+ /* +1 to account for integer division of CIMLA_SIZE/10 */
+ p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
+ 10 * sizeof(u32), 1,
+ cfg & UPDBGLACAPTPCONLY_F ?
+ cim_la_show_pc_t6 : cim_la_show_t6);
+ } else {
+ p = seq_open_tab(file, adap->params.cim_la_size / 8,
+ 8 * sizeof(u32), 1,
+ cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 :
+ cim_la_show);
+ }
if (!p)
return -ENOMEM;
@@ -298,11 +346,11 @@ static int cim_qcfg_show(struct seq_file *seq, void *v)
if (is_t4(adap->params.chip)) {
i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
- wr = obq_wr_t4;
+ wr = obq_wr_t4;
} else {
i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
- wr = obq_wr_t5;
+ wr = obq_wr_t5;
}
}
if (i)
@@ -1895,13 +1943,13 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+ int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1;
- int toe_idx = r - eth_entries;
- int rdma_idx = toe_idx - toe_entries;
+ int iscsi_idx = r - eth_entries;
+ int rdma_idx = iscsi_idx - iscsi_entries;
int ciq_idx = rdma_idx - rdma_entries;
int ctrl_idx = ciq_idx - ciq_entries;
int fq_idx = ctrl_idx - ctrl_entries;
@@ -1917,8 +1965,12 @@ do { \
seq_putc(seq, '\n'); \
} while (0)
#define S(s, v) S3("s", s, v)
+#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
#define T(s, v) S3("u", s, tx[i].v)
+#define TL(s, v) T3("lu", s, v)
+#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
#define R(s, v) S3("u", s, rx[i].v)
+#define RL(s, v) R3("lu", s, v)
if (r < eth_entries) {
int base_qset = r * 4;
@@ -1957,12 +2009,30 @@ do { \
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
- } else if (toe_idx < toe_entries) {
- const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
- const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
- int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxCSO:", stats.rx_cso);
+ RL("VLANxtract:", stats.vlan_ex);
+ RL("LROmerged:", stats.lro_merged);
+ RL("LROpackets:", stats.lro_pkts);
+ RL("RxDrops:", stats.rx_drops);
+ TL("TSO:", tso);
+ TL("TxCSO:", tx_cso);
+ TL("VLANins:", vlan_ins);
+ TL("TxQFull:", q.stops);
+ TL("TxQRestarts:", q.restarts);
+ TL("TxMapErr:", mapping_err);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLStarving:", fl.starving);
+
+ } else if (iscsi_idx < iscsi_entries) {
+ const struct sge_ofld_rxq *rx =
+ &adap->sge.ofldrxq[iscsi_idx * 4];
+ const struct sge_ofld_txq *tx =
+ &adap->sge.ofldtxq[iscsi_idx * 4];
+ int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
- S("QType:", "TOE");
+ S("QType:", "iSCSI");
T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
@@ -1982,6 +2052,13 @@ do { \
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImmPkts:", stats.imm);
+ RL("RxNoMem:", stats.nomem);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLStarving:", fl.starving);
+
} else if (rdma_idx < rdma_entries) {
const struct sge_ofld_rxq *rx =
&adap->sge.rdmarxq[rdma_idx * 4];
@@ -2004,6 +2081,13 @@ do { \
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImmPkts:", stats.imm);
+ RL("RxNoMem:", stats.nomem);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLStarving:", fl.starving);
+
} else if (ciq_idx < ciq_entries) {
const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
@@ -2019,6 +2103,9 @@ do { \
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:",
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ RL("RxAN:", stats.an);
+ RL("RxNoMem:", stats.nomem);
+
} else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
int n = min(4, adap->params.nports - 4 * ctrl_idx);
@@ -2029,6 +2116,8 @@ do { \
T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx);
+ TL("TxQFull:", q.stops);
+ TL("TxQRestarts:", q.restarts);
} else if (fq_idx == 0) {
const struct sge_rspq *evtq = &adap->sge.fw_evtq;
@@ -2044,10 +2133,14 @@ do { \
adap->sge.counter_val[evtq->pktcnt_idx]);
}
#undef R
+#undef RL
#undef T
+#undef TL
#undef S
+#undef R3
+#undef T3
#undef S3
-return 0;
+ return 0;
}
static int sge_queue_entries(const struct adapter *adap)
@@ -2164,6 +2257,73 @@ static const struct file_operations mem_debugfs_fops = {
.llseek = default_llseek,
};
+static int tid_info_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+ const struct tid_info *t = &adap->tids;
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+ unsigned int sb;
+
+ if (chip <= CHELSIO_T5)
+ sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
+ else
+ sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
+
+ if (sb) {
+ seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
+ adap->tids.hash_base,
+ t->ntids - 1);
+ seq_printf(seq, ", in use: %u/%u\n",
+ atomic_read(&t->tids_in_use),
+ atomic_read(&t->hash_tids_in_use));
+ } else if (adap->flags & FW_OFLD_CONN) {
+ seq_printf(seq, "TID range: %u..%u/%u..%u",
+ t->aftid_base,
+ t->aftid_end,
+ adap->tids.hash_base,
+ t->ntids - 1);
+ seq_printf(seq, ", in use: %u/%u\n",
+ atomic_read(&t->tids_in_use),
+ atomic_read(&t->hash_tids_in_use));
+ } else {
+ seq_printf(seq, "TID range: %u..%u",
+ adap->tids.hash_base,
+ t->ntids - 1);
+ seq_printf(seq, ", in use: %u\n",
+ atomic_read(&t->hash_tids_in_use));
+ }
+ } else if (t->ntids) {
+ seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+ seq_printf(seq, ", in use: %u\n",
+ atomic_read(&t->tids_in_use));
+ }
+
+ if (t->nstids)
+ seq_printf(seq, "STID range: %u..%u, in use: %u\n",
+ (!t->stid_base &&
+ (chip <= CHELSIO_T5)) ?
+ t->stid_base + 1 : t->stid_base,
+ t->stid_base + t->nstids - 1, t->stids_in_use);
+ if (t->natids)
+ seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
+ t->natids - 1, t->atids_in_use);
+ seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
+ t->ftid_base + t->nftids - 1);
+ if (t->nsftids)
+ seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
+ t->sftid_base, t->sftid_base + t->nsftids - 2,
+ t->sftids_in_use);
+ if (t->ntids)
+ seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
+ t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
+ t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
{
@@ -2227,6 +2387,290 @@ static const struct file_operations blocked_fl_fops = {
.llseek = generic_file_llseek,
};
+struct mem_desc {
+ unsigned int base;
+ unsigned int limit;
+ unsigned int idx;
+};
+
+static int mem_desc_cmp(const void *a, const void *b)
+{
+ return ((const struct mem_desc *)a)->base -
+ ((const struct mem_desc *)b)->base;
+}
+
+static void mem_region_show(struct seq_file *seq, const char *name,
+ unsigned int from, unsigned int to)
+{
+ char buf[40];
+
+ string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
+ sizeof(buf));
+ seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
+}
+
+static int meminfo_show(struct seq_file *seq, void *v)
+{
+ static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
+ "MC0:", "MC1:"};
+ static const char * const region[] = {
+ "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+ "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+ "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+ "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+ "RQUDP region:", "PBL region:", "TXPBL region:",
+ "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+ "On-chip queues:"
+ };
+
+ int i, n;
+ u32 lo, hi, used, alloc;
+ struct mem_desc avail[4];
+ struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
+ struct mem_desc *md = mem;
+ struct adapter *adap = seq->private;
+
+ for (i = 0; i < ARRAY_SIZE(mem); i++) {
+ mem[i].limit = 0;
+ mem[i].idx = i;
+ }
+
+ /* Find and sort the populated memory ranges */
+ i = 0;
+ lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (lo & EDRAM0_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+ avail[i].base = EDRAM0_BASE_G(hi) << 20;
+ avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
+ avail[i].idx = 0;
+ i++;
+ }
+ if (lo & EDRAM1_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+ avail[i].base = EDRAM1_BASE_G(hi) << 20;
+ avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
+ avail[i].idx = 1;
+ i++;
+ }
+
+ if (is_t5(adap->params.chip)) {
+ if (lo & EXT_MEM0_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
+ avail[i].idx = 3;
+ i++;
+ }
+ if (lo & EXT_MEM1_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
+ avail[i].idx = 4;
+ i++;
+ }
+ } else {
+ if (lo & EXT_MEM_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ avail[i].base = EXT_MEM_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
+ avail[i].idx = 2;
+ i++;
+ }
+ }
+ if (!i) /* no memory available */
+ return 0;
+ sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+ (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
+
+ /* the next few have explicit upper bounds */
+ md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
+ PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
+ md++;
+
+ md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
+ PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
+ md++;
+
+ if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
+ hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
+ md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+ } else {
+ hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+ md->base = t4_read_reg(adap,
+ LE_DB_HASH_TBL_BASE_ADDR_A);
+ }
+ md->limit = 0;
+ } else {
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ }
+ md++;
+
+#define ulp_region(reg) do { \
+ md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
+ (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
+} while (0)
+
+ ulp_region(RX_ISCSI);
+ ulp_region(RX_TDDP);
+ ulp_region(TX_TPT);
+ ulp_region(RX_STAG);
+ ulp_region(RX_RQ);
+ ulp_region(RX_RQUDP);
+ ulp_region(RX_PBL);
+ ulp_region(TX_PBL);
+#undef ulp_region
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region);
+ if (!is_t4(adap->params.chip)) {
+ u32 size = 0;
+ u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
+ u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
+
+ if (is_t5(adap->params.chip)) {
+ if (sge_ctrl & VFIFO_ENABLE_F)
+ size = DBVFIFO_SIZE_G(fifo_size);
+ } else {
+ size = T6_DBVFIFO_SIZE_G(fifo_size);
+ }
+
+ if (size) {
+ md->base = BASEADDR_G(t4_read_reg(adap,
+ SGE_DBVFIFO_BADDR_A));
+ md->limit = md->base + (size << 2) - 1;
+ }
+ }
+
+ md++;
+
+ md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
+ md->limit = 0;
+ md++;
+ md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
+ md->limit = 0;
+ md++;
+
+ md->base = adap->vres.ocq.start;
+ if (adap->vres.ocq.size)
+ md->limit = md->base + adap->vres.ocq.size - 1;
+ else
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ md++;
+
+ /* add any address-space holes, there can be up to 3 */
+ for (n = 0; n < i - 1; n++)
+ if (avail[n].limit < avail[n + 1].base)
+ (md++)->base = avail[n].limit;
+ if (avail[n].limit)
+ (md++)->base = avail[n].limit;
+
+ n = md - mem;
+ sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+ for (lo = 0; lo < i; lo++)
+ mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
+ avail[lo].limit - 1);
+
+ seq_putc(seq, '\n');
+ for (i = 0; i < n; i++) {
+ if (mem[i].idx >= ARRAY_SIZE(region))
+ continue; /* skip holes */
+ if (!mem[i].limit)
+ mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
+ mem_region_show(seq, region[mem[i].idx], mem[i].base,
+ mem[i].limit);
+ }
+
+ seq_putc(seq, '\n');
+ lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
+ hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+ mem_region_show(seq, "uP RAM:", lo, hi);
+
+ lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
+ hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+ mem_region_show(seq, "uP Extmem2:", lo, hi);
+
+ lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
+ seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
+ PMRXMAXPAGE_G(lo),
+ t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
+ (lo & PMRXNUMCHN_F) ? 2 : 1);
+
+ lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
+ hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
+ seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
+ PMTXMAXPAGE_G(lo),
+ hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
+ hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
+ seq_printf(seq, "%u p-structs\n\n",
+ t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
+
+ for (i = 0; i < 4; i++) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
+ if (is_t5(adap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ /* For T6 these are MAC buffer groups */
+ seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
+ i, used, alloc);
+ }
+ for (i = 0; i < adap->params.arch.nchan; i++) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(adap,
+ MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
+ if (is_t5(adap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ /* For T6 these are MAC buffer groups */
+ seq_printf(seq,
+ "Loopback %d using %u pages out of %u allocated\n",
+ i, used, alloc);
+ }
+ return 0;
+}
+
+static int meminfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, meminfo_show, inode->i_private);
+}
+
+static const struct file_operations meminfo_fops = {
+ .owner = THIS_MODULE,
+ .open = meminfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -2293,7 +2737,9 @@ int t4_setup_debugfs(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
#endif
+ { "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
+ { "meminfo", &meminfo_fops, S_IRUSR, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
@@ -2341,6 +2787,8 @@ int t4_setup_debugfs(struct adapter *adap)
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
&flash_debugfs_fops, adap->params.sf_size);
+ debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
+ adap->debugfs_root, &adap->use_bd);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 687acf71fa15..5eedb98ff581 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -925,6 +925,20 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
const struct firmware *fw;
struct adapter *adap = netdev2adap(netdev);
unsigned int mbox = PCIE_FW_MASTER_M + 1;
+ u32 pcie_fw;
+ unsigned int master;
+ u8 master_vld = 0;
+
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ master = PCIE_FW_MASTER_G(pcie_fw);
+ if (pcie_fw & PCIE_FW_MASTER_VLD_F)
+ master_vld = 1;
+ /* if csiostor is the master return */
+ if (master_vld && (master != adap->pf)) {
+ dev_warn(adap->pdev_dev,
+ "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
+ return -EOPNOTSUPP;
+ }
ef->data[sizeof(ef->data) - 1] = '\0';
ret = request_firmware(&fw, ef->data, adap->pdev_dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 351f3b1bf800..f35dd2284d40 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1548,7 +1548,7 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
t->stid_tab[stid].data = data;
stid -= t->nstids;
stid += t->sftid_base;
- t->stids_in_use++;
+ t->sftids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -1573,10 +1573,14 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
else
bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL;
- if (family == PF_INET)
- t->stids_in_use--;
- else
- t->stids_in_use -= 4;
+ if (stid < t->nstids) {
+ if (family == PF_INET)
+ t->stids_in_use--;
+ else
+ t->stids_in_use -= 4;
+ } else {
+ t->sftids_in_use--;
+ }
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@@ -1654,20 +1658,25 @@ static void process_tid_release_list(struct work_struct *work)
*/
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
{
- void *old;
struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
- old = t->tid_tab[tid];
+ WARN_ON(tid >= t->ntids);
+
+ if (t->tid_tab[tid]) {
+ t->tid_tab[tid] = NULL;
+ if (t->hash_base && (tid >= t->hash_base))
+ atomic_dec(&t->hash_tids_in_use);
+ else
+ atomic_dec(&t->tids_in_use);
+ }
+
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
if (likely(skb)) {
- t->tid_tab[tid] = NULL;
mk_tid_release(skb, chan, tid);
t4_ofld_send(adap, skb);
} else
cxgb4_queue_tid_release(t, chan, tid);
- if (old)
- atomic_dec(&t->tids_in_use);
}
EXPORT_SYMBOL(cxgb4_remove_tid);
@@ -1702,9 +1711,11 @@ static int tid_init(struct tid_info *t)
spin_lock_init(&t->atid_lock);
t->stids_in_use = 0;
+ t->sftids_in_use = 0;
t->afree = NULL;
t->atids_in_use = 0;
atomic_set(&t->tids_in_use, 0);
+ atomic_set(&t->hash_tids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
@@ -4551,6 +4562,32 @@ static void free_some_resources(struct adapter *adapter)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
+static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
+{
+ int ver, chip;
+ u16 device_id;
+
+ /* Retrieve adapter's device ID */
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ ver = device_id >> 12;
+ switch (ver) {
+ case CHELSIO_T4:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+ break;
+ case CHELSIO_T5:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ break;
+ case CHELSIO_T6:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ break;
+ default:
+ dev_err(&pdev->dev, "Device %d is not supported\n",
+ device_id);
+ return -EINVAL;
+ }
+ return chip;
+}
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err, s_qpp, qpp, num_seg;
@@ -4558,6 +4595,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bool highdma = false;
struct adapter *adapter = NULL;
void __iomem *regs;
+ u32 whoami, pl_rev;
+ enum chip_type chip;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4586,7 +4625,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_unmap_bar0;
/* We control everything through one PF */
- func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
+ whoami = readl(regs + PL_WHOAMI_A);
+ pl_rev = REV_G(readl(regs + PL_REV_A));
+ chip = get_chip_type(pdev, pl_rev);
+ func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
if (func != ent->driver_data) {
iounmap(regs);
pci_disable_device(pdev);
@@ -4757,7 +4800,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
cfg_queues(adapter);
- adapter->l2t = t4_init_l2t();
+ adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
if (!adapter->l2t) {
/* We tolerate a lack of L2T, giving up some functionality */
dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
@@ -4782,6 +4825,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->params.offload = 0;
}
+ if (is_offload(adapter)) {
+ if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+ u32 hash_base, hash_reg;
+
+ if (chip <= CHELSIO_T5) {
+ hash_reg = LE_DB_TID_HASHBASE_A;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base / 4;
+ } else {
+ hash_reg = T6_LE_DB_HASH_TID_BASE_A;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base;
+ }
+ }
+ }
+
/* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0)
adapter->flags |= USING_MSIX;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b27897d4f787..c3a8be5541e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -96,6 +96,7 @@ struct tid_info {
unsigned long *stid_bmap;
unsigned int nstids;
unsigned int stid_base;
+ unsigned int hash_base;
union aopen_entry *atid_tab;
unsigned int natids;
@@ -116,8 +117,12 @@ struct tid_info {
spinlock_t stid_lock;
unsigned int stids_in_use;
+ unsigned int sftids_in_use;
+ /* TIDs in the TCAM */
atomic_t tids_in_use;
+ /* TIDs in the HASH */
+ atomic_t hash_tids_in_use;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -147,7 +152,10 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
unsigned int tid)
{
t->tid_tab[tid] = data;
- atomic_inc(&t->tids_in_use);
+ if (t->hash_base && (tid >= t->hash_base))
+ atomic_inc(&t->hash_tids_in_use);
+ else
+ atomic_inc(&t->tids_in_use);
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 252efc29321f..ac27898c6ab0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -51,24 +51,17 @@
#define VLAN_NONE 0xfff
/* identifies sync vs async L2T_WRITE_REQs */
-#define F_SYNC_WR (1 << 12)
-
-enum {
- L2T_STATE_VALID, /* entry is up to date */
- L2T_STATE_STALE, /* entry may be used but needs revalidation */
- L2T_STATE_RESOLVING, /* entry needs address resolution */
- L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
-
- /* when state is one of the below the entry is not hashed */
- L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
- L2T_STATE_UNUSED /* entry not in use */
-};
+#define SYNC_WR_S 12
+#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
+#define SYNC_WR_F SYNC_WR_V(1)
struct l2t_data {
+ unsigned int l2t_start; /* start index of our piece of the L2T */
+ unsigned int l2t_size; /* number of entries in l2tab */
rwlock_t lock;
atomic_t nfree; /* number of free entries */
struct l2t_entry *rover; /* starting point for next allocation */
- struct l2t_entry l2tab[L2T_SIZE];
+ struct l2t_entry l2tab[0]; /* MUST BE LAST */
};
static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -85,29 +78,36 @@ static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
/*
* To avoid having to check address families we do not allow v4 and v6
* neighbors to be on the same hash chain. We keep v4 entries in the first
- * half of available hash buckets and v6 in the second.
+ * half of available hash buckets and v6 in the second. We need at least two
+ * entries in our L2T for this scheme to work.
*/
enum {
- L2T_SZ_HALF = L2T_SIZE / 2,
- L2T_HASH_MASK = L2T_SZ_HALF - 1
+ L2T_MIN_HASH_BUCKETS = 2,
};
-static inline unsigned int arp_hash(const u32 *key, int ifindex)
+static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
+ int ifindex)
{
- return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+ unsigned int l2t_size_half = d->l2t_size / 2;
+
+ return jhash_2words(*key, ifindex, 0) % l2t_size_half;
}
-static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
+ int ifindex)
{
+ unsigned int l2t_size_half = d->l2t_size / 2;
u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
- return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+ return (l2t_size_half +
+ (jhash_2words(xor, ifindex, 0) % l2t_size_half));
}
-static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
+ int addr_len, int ifindex)
{
- return addr_len == 4 ? arp_hash(addr, ifindex) :
- ipv6_hash(addr, ifindex);
+ return addr_len == 4 ? arp_hash(d, addr, ifindex) :
+ ipv6_hash(d, addr, ifindex);
}
/*
@@ -139,6 +139,8 @@ static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
*/
static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
{
+ struct l2t_data *d = adap->l2t;
+ unsigned int l2t_idx = e->idx + d->l2t_start;
struct sk_buff *skb;
struct cpl_l2t_write_req *req;
@@ -150,10 +152,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
- e->idx | (sync ? F_SYNC_WR : 0) |
+ l2t_idx | (sync ? SYNC_WR_F : 0) |
TID_QID_V(adap->sge.fw_evtq.abs_id)));
req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
- req->l2t_idx = htons(e->idx);
+ req->l2t_idx = htons(l2t_idx);
req->vlan = htons(e->vlan);
if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
@@ -190,18 +192,19 @@ static void send_pending(struct adapter *adap, struct l2t_entry *e)
*/
void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
{
+ struct l2t_data *d = adap->l2t;
unsigned int tid = GET_TID(rpl);
- unsigned int idx = tid & (L2T_SIZE - 1);
+ unsigned int l2t_idx = tid % L2T_SIZE;
if (unlikely(rpl->status != CPL_ERR_NONE)) {
dev_err(adap->pdev_dev,
"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
- rpl->status, idx);
+ rpl->status, l2t_idx);
return;
}
- if (tid & F_SYNC_WR) {
- struct l2t_entry *e = &adap->l2t->l2tab[idx];
+ if (tid & SYNC_WR_F) {
+ struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
spin_lock(&e->lock);
if (e->state != L2T_STATE_SWITCHING) {
@@ -276,7 +279,7 @@ static struct l2t_entry *alloc_l2e(struct l2t_data *d)
return NULL;
/* there's definitely a free entry */
- for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+ for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
if (atomic_read(&e->refcnt) == 0)
goto found;
@@ -368,7 +371,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *)neigh->primary_key;
int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(addr, addr_len, ifidx);
+ int hash = addr_hash(d, addr, addr_len, ifidx);
if (neigh->dev->flags & IFF_LOOPBACK)
lport = netdev2pinfo(physdev)->tx_chan + 4;
@@ -481,7 +484,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(addr, addr_len, ifidx);
+ int hash = addr_hash(d, addr, addr_len, ifidx);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
@@ -554,20 +557,30 @@ int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
return write_l2e(adap, e, 0);
}
-struct l2t_data *t4_init_l2t(void)
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
{
+ unsigned int l2t_size;
int i;
struct l2t_data *d;
- d = t4_alloc_mem(sizeof(*d));
+ if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+ return NULL;
+ l2t_size = l2t_end - l2t_start + 1;
+ if (l2t_size < L2T_MIN_HASH_BUCKETS)
+ return NULL;
+
+ d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
if (!d)
return NULL;
+ d->l2t_start = l2t_start;
+ d->l2t_size = l2t_size;
+
d->rover = d->l2tab;
- atomic_set(&d->nfree, L2T_SIZE);
+ atomic_set(&d->nfree, l2t_size);
rwlock_init(&d->lock);
- for (i = 0; i < L2T_SIZE; ++i) {
+ for (i = 0; i < d->l2t_size; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
spin_lock_init(&d->l2tab[i].lock);
@@ -578,9 +591,9 @@ struct l2t_data *t4_init_l2t(void)
static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
{
- struct l2t_entry *l2tab = seq->private;
+ struct l2t_data *d = seq->private;
- return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+ return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
}
static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
@@ -620,6 +633,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
"Ethernet address VLAN/P LP State Users Port\n");
else {
char ip[60];
+ struct l2t_data *d = seq->private;
struct l2t_entry *e = v;
spin_lock_bh(&e->lock);
@@ -628,7 +642,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
else
sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
- e->idx, ip, e->dmac,
+ e->idx + d->l2t_start, ip, e->dmac,
e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
l2e_state(e), atomic_read(&e->refcnt),
e->neigh ? e->neigh->dev->name : "");
@@ -652,7 +666,7 @@ static int l2t_seq_open(struct inode *inode, struct file *file)
struct adapter *adap = inode->i_private;
struct seq_file *seq = file->private_data;
- seq->private = adap->l2t->l2tab;
+ seq->private = adap->l2t;
}
return rc;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index a30126ce90cb..b38dc526aad5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -39,6 +39,20 @@
#include <linux/if_ether.h>
#include <linux/atomic.h>
+enum { L2T_SIZE = 4096 }; /* # of L2T entries */
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+ L2T_STATE_NOARP, /* Netdev down or removed*/
+
+ /* when state is one of the below the entry is not hashed */
+ L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
struct adapter;
struct l2t_data;
struct neighbour;
@@ -56,7 +70,7 @@ struct cpl_l2t_write_rpl;
*/
struct l2t_entry {
u16 state; /* entry state */
- u16 idx; /* entry index */
+ u16 idx; /* entry index within in-memory table */
u32 addr[4]; /* next hop IP or IPv6 address */
int ifindex; /* neighbor's net_device's ifindex */
struct neighbour *neigh; /* associated neighbour */
@@ -104,7 +118,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
u8 port, u8 *eth_addr);
-struct l2t_data *t4_init_l2t(void);
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
extern const struct file_operations t4_l2t_fops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 942db078f33a..78f446c58422 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1137,7 +1137,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 wr_mid;
+ u32 wr_mid, ctrl0;
u64 cntrl, *end;
int qidx, credits;
unsigned int flits, ndesc;
@@ -1274,9 +1274,15 @@ out_free: dev_kfree_skb_any(skb);
#endif /* CONFIG_CHELSIO_T4_FCOE */
}
- cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
- TXPKT_INTF_V(pi->tx_chan) |
- TXPKT_PF_V(adap->pf));
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ if (is_t4(adap->params.chip))
+ ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
+ else
+ ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
+#endif
+ cpl->ctrl0 = htonl(ctrl0);
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1418,18 +1424,17 @@ static void restart_ctrlq(unsigned long data)
struct fw_wr_hdr *wr;
unsigned int ndesc = skb->priority; /* previously saved */
- /*
- * Write descriptors and free skbs outside the lock to limit
+ written += ndesc;
+ /* Write descriptors and free skbs outside the lock to limit
* wait times. q->full is still set so new skbs will be queued.
*/
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ txq_advance(&q->q, ndesc);
spin_unlock(&q->sendq.lock);
- wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb);
- written += ndesc;
- txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
unsigned long old = q->q.stops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2b52aae7ec86..91750ad580ae 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -345,6 +345,43 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
FW_CMD_MAX_TIMEOUT);
}
+static int t4_edc_err_read(struct adapter *adap, int idx)
+{
+ u32 edc_ecc_err_addr_reg;
+ u32 rdata_reg;
+
+ if (is_t4(adap->params.chip)) {
+ CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
+ return 0;
+ }
+ if (idx != 0 && idx != 1) {
+ CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
+ return 0;
+ }
+
+ edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
+ rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
+
+ CH_WARN(adap,
+ "edc%d err addr 0x%x: 0x%x.\n",
+ idx, edc_ecc_err_addr_reg,
+ t4_read_reg(adap, edc_ecc_err_addr_reg));
+ CH_WARN(adap,
+ "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+ rdata_reg,
+ (unsigned long long)t4_read_reg64(adap, rdata_reg),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
+
+ return 0;
+}
+
/**
* t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
* @adap: the adapter
@@ -1322,9 +1359,10 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
};
static const unsigned int t6_reg_ranges[] = {
- 0x1008, 0x114c,
+ 0x1008, 0x1124,
+ 0x1138, 0x114c,
0x1180, 0x11b4,
- 0x11fc, 0x1250,
+ 0x11fc, 0x1254,
0x1280, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
@@ -1345,18 +1383,18 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x5a80, 0x5a9c,
0x5b94, 0x5bfc,
0x5c10, 0x5ec0,
- 0x5ec8, 0x5ec8,
+ 0x5ec8, 0x5ecc,
0x6000, 0x6040,
- 0x6058, 0x6154,
+ 0x6058, 0x619c,
0x7700, 0x7798,
0x77c0, 0x7880,
0x78cc, 0x78fc,
0x7b00, 0x7c54,
0x7d00, 0x7efc,
- 0x8dc0, 0x8de0,
+ 0x8dc0, 0x8de4,
0x8df8, 0x8e84,
0x8ea0, 0x8f88,
- 0x8fb8, 0x911c,
+ 0x8fb8, 0x9124,
0x9400, 0x9470,
0x9600, 0x971c,
0x9800, 0x9808,
@@ -1371,20 +1409,21 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9f00, 0x9f6c,
0x9f80, 0xa020,
0xd004, 0xd03c,
+ 0xd100, 0xd118,
+ 0xd200, 0xd31c,
0xdfc0, 0xdfe0,
0xe000, 0xf008,
0x11000, 0x11014,
- 0x11048, 0x11110,
- 0x11118, 0x1117c,
- 0x11190, 0x11260,
+ 0x11048, 0x1117c,
+ 0x11190, 0x11270,
0x11300, 0x1130c,
- 0x12000, 0x1205c,
+ 0x12000, 0x1206c,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
0x19150, 0x191b0,
0x191d0, 0x191e8,
- 0x19238, 0x192b8,
+ 0x19238, 0x192bc,
0x193f8, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
@@ -1461,12 +1500,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x30000, 0x30070,
- 0x30100, 0x3015c,
- 0x30190, 0x301d0,
- 0x30200, 0x30318,
+ 0x30100, 0x301d0,
+ 0x30200, 0x30320,
0x30400, 0x3052c,
0x30540, 0x3061c,
- 0x30800, 0x3088c,
+ 0x30800, 0x30890,
0x308c0, 0x30908,
0x30910, 0x309b8,
0x30a00, 0x30a04,
@@ -1539,12 +1577,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x33c24, 0x33c50,
0x33cf0, 0x33cfc,
0x34000, 0x34070,
- 0x34100, 0x3415c,
- 0x34190, 0x341d0,
- 0x34200, 0x34318,
+ 0x34100, 0x341d0,
+ 0x34200, 0x34320,
0x34400, 0x3452c,
0x34540, 0x3461c,
- 0x34800, 0x3488c,
+ 0x34800, 0x34890,
0x348c0, 0x34908,
0x34910, 0x349b8,
0x34a00, 0x34a04,
@@ -3281,6 +3318,8 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
if (v & ECC_CE_INT_CAUSE_F) {
u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
+ t4_edc_err_read(adapter, idx);
+
t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
if (printk_ratelimit())
dev_warn(adapter->pdev_dev,
@@ -3488,7 +3527,9 @@ int t4_slow_intr_handler(struct adapter *adapter)
void t4_intr_enable(struct adapter *adapter)
{
u32 val = 0;
- u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
@@ -3513,7 +3554,9 @@ void t4_intr_enable(struct adapter *adapter)
*/
void t4_intr_disable(struct adapter *adapter)
{
- u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
@@ -3687,6 +3730,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
return 0;
}
+static unsigned int t4_use_ldst(struct adapter *adap)
+{
+ return (adap->flags & FW_OK) || !adap->use_bd;
+}
+
/**
* t4_fw_tp_pio_rw - Access TP PIO through LDST
* @adap: the adapter
@@ -3730,7 +3778,7 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
*/
void t4_read_rss_key(struct adapter *adap, u32 *key)
{
- if (adap->flags & FW_OK)
+ if (t4_use_ldst(adap))
t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
else
t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3760,7 +3808,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
(vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
rss_key_addr_cnt = 32;
- if (adap->flags & FW_OK)
+ if (t4_use_ldst(adap))
t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
else
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3789,7 +3837,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp)
{
- if (adapter->flags & FW_OK)
+ if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, valp, 1,
TP_RSS_PF0_CONFIG_A + index, 1);
else
@@ -3829,7 +3877,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
/* Grab the VFL/VFH values ...
*/
- if (adapter->flags & FW_OK) {
+ if (t4_use_ldst(adapter)) {
t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
} else {
@@ -3850,7 +3898,7 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
{
u32 pfmap;
- if (adapter->flags & FW_OK)
+ if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3868,7 +3916,7 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
{
u32 pfmask;
- if (adapter->flags & FW_OK)
+ if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3924,43 +3972,25 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
*/
void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
{
- /* T6 and later has 2 channels */
- if (adap->params.arch.nchan == NCHAN) {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_cong_drops, 8,
- TP_MIB_TNL_CNG_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_tx_drops, 4,
- TP_MIB_TNL_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_vlan_drops, 4,
- TP_MIB_OFD_VLN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp6_in_errs, 4,
- TP_MIB_TCP_V6IN_ERR_0_A);
- } else {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_cong_drops, 2,
- TP_MIB_TNL_CNG_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_chan_drops, 2,
- TP_MIB_OFD_CHN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_vlan_drops, 2,
- TP_MIB_OFD_VLN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
- }
+ int nchan = adap->params.arch.nchan;
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
+
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
&st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
}
@@ -3974,16 +4004,13 @@ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
*/
void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
{
- /* T6 and later has 2 channels */
- if (adap->params.arch.nchan == NCHAN) {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
- 8, TP_MIB_CPL_IN_REQ_0_A);
- } else {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
- 2, TP_MIB_CPL_IN_REQ_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
- 2, TP_MIB_CPL_OUT_RSP_0_A);
- }
+ int nchan = adap->params.arch.nchan;
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ nchan, TP_MIB_CPL_IN_REQ_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+ nchan, TP_MIB_CPL_OUT_RSP_0_A);
+
}
/**
@@ -6294,7 +6321,7 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- if (adap->flags & FW_OK) {
+ if (t4_use_ldst(adap)) {
t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP_A, 1);
t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c8488f430d19..640369df8b3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@ enum {
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
- L2T_SIZE = 4096, /* # of L2T entries */
PM_NSTATS = 5, /* # of PM stats */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 132cb8fc0bf7..b99144afd4ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -660,6 +660,9 @@ struct cpl_tx_pkt {
#define TXPKT_OVLAN_IDX_S 12
#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+#define TXPKT_T5_OVLAN_IDX_S 12
+#define TXPKT_T5_OVLAN_IDX_V(x) ((x) << TXPKT_T5_OVLAN_IDX_S)
+
#define TXPKT_INTF_S 16
#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index d7ca106927b0..8353a6cbfcc2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -142,6 +142,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
@@ -155,6 +157,22 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+
+ /* T6 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x6001),
+ CH_PCI_ID_TABLE_FENTRY(0x6002),
+ CH_PCI_ID_TABLE_FENTRY(0x6003),
+ CH_PCI_ID_TABLE_FENTRY(0x6004),
+ CH_PCI_ID_TABLE_FENTRY(0x6005),
+ CH_PCI_ID_TABLE_FENTRY(0x6006),
+ CH_PCI_ID_TABLE_FENTRY(0x6007),
+ CH_PCI_ID_TABLE_FENTRY(0x6009),
+ CH_PCI_ID_TABLE_FENTRY(0x600d),
+ CH_PCI_ID_TABLE_FENTRY(0x6010),
+ CH_PCI_ID_TABLE_FENTRY(0x6011),
+ CH_PCI_ID_TABLE_FENTRY(0x6014),
+ CH_PCI_ID_TABLE_FENTRY(0x6015),
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 375a825573b0..4d2c9290c7dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -136,6 +136,20 @@
#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
& INGPACKBOUNDARY_M)
+#define VFIFO_ENABLE_S 10
+#define VFIFO_ENABLE_V(x) ((x) << VFIFO_ENABLE_S)
+#define VFIFO_ENABLE_F VFIFO_ENABLE_V(1U)
+
+#define SGE_DBVFIFO_BADDR_A 0x1138
+
+#define DBVFIFO_SIZE_S 6
+#define DBVFIFO_SIZE_M 0xfffU
+#define DBVFIFO_SIZE_G(x) (((x) >> DBVFIFO_SIZE_S) & DBVFIFO_SIZE_M)
+
+#define T6_DBVFIFO_SIZE_S 0
+#define T6_DBVFIFO_SIZE_M 0x1fffU
+#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+
#define GLOBALENABLE_S 0
#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
#define GLOBALENABLE_F GLOBALENABLE_V(1U)
@@ -303,6 +317,8 @@
#define SGE_FL_BUFFER_SIZE7_A 0x1060
#define SGE_FL_BUFFER_SIZE8_A 0x1064
+#define SGE_IMSG_CTXT_BADDR_A 0x1088
+#define SGE_FLM_CACHE_BADDR_A 0x108c
#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
#define THRESHOLD_0_S 24
@@ -338,6 +354,11 @@
#define EGRTHRESHOLDPACKING_G(x) \
(((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
+#define T6_EGRTHRESHOLDPACKING_S 16
+#define T6_EGRTHRESHOLDPACKING_M 0xffU
+#define T6_EGRTHRESHOLDPACKING_G(x) \
+ (((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M)
+
#define SGE_TIMESTAMP_LO_A 0x1098
#define SGE_TIMESTAMP_HI_A 0x109c
@@ -352,6 +373,7 @@
#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
#define SGE_DBFIFO_STATUS_A 0x10a4
+#define SGE_DBVFIFO_SIZE_A 0x113c
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
@@ -864,6 +886,10 @@
/* registers for module MA */
#define MA_EDRAM0_BAR_A 0x77c0
+#define EDRAM0_BASE_S 16
+#define EDRAM0_BASE_M 0xfffU
+#define EDRAM0_BASE_G(x) (((x) >> EDRAM0_BASE_S) & EDRAM0_BASE_M)
+
#define EDRAM0_SIZE_S 0
#define EDRAM0_SIZE_M 0xfffU
#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S)
@@ -871,6 +897,10 @@
#define MA_EDRAM1_BAR_A 0x77c4
+#define EDRAM1_BASE_S 16
+#define EDRAM1_BASE_M 0xfffU
+#define EDRAM1_BASE_G(x) (((x) >> EDRAM1_BASE_S) & EDRAM1_BASE_M)
+
#define EDRAM1_SIZE_S 0
#define EDRAM1_SIZE_M 0xfffU
#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S)
@@ -878,6 +908,11 @@
#define MA_EXT_MEMORY_BAR_A 0x77c8
+#define EXT_MEM_BASE_S 16
+#define EXT_MEM_BASE_M 0xfffU
+#define EXT_MEM_BASE_V(x) ((x) << EXT_MEM_BASE_S)
+#define EXT_MEM_BASE_G(x) (((x) >> EXT_MEM_BASE_S) & EXT_MEM_BASE_M)
+
#define EXT_MEM_SIZE_S 0
#define EXT_MEM_SIZE_M 0xfffU
#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S)
@@ -885,6 +920,10 @@
#define MA_EXT_MEMORY1_BAR_A 0x7808
+#define EXT_MEM1_BASE_S 16
+#define EXT_MEM1_BASE_M 0xfffU
+#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
+
#define EXT_MEM1_SIZE_S 0
#define EXT_MEM1_SIZE_M 0xfffU
#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S)
@@ -892,6 +931,10 @@
#define MA_EXT_MEMORY0_BAR_A 0x77c8
+#define EXT_MEM0_BASE_S 16
+#define EXT_MEM0_BASE_M 0xfffU
+#define EXT_MEM0_BASE_G(x) (((x) >> EXT_MEM0_BASE_S) & EXT_MEM0_BASE_M)
+
#define EXT_MEM0_SIZE_S 0
#define EXT_MEM0_SIZE_M 0xfffU
#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S)
@@ -973,6 +1016,10 @@
/* registers for module CIM */
#define CIM_BOOT_CFG_A 0x7b00
+#define CIM_SDRAM_BASE_ADDR_A 0x7b14
+#define CIM_SDRAM_ADDR_SIZE_A 0x7b18
+#define CIM_EXTMEM2_BASE_ADDR_A 0x7b1c
+#define CIM_EXTMEM2_ADDR_SIZE_A 0x7b20
#define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
#define BOOTADDR_M 0xffffff00U
@@ -1231,6 +1278,33 @@
#define TP_OUT_CONFIG_A 0x7d04
#define TP_GLOBAL_CONFIG_A 0x7d08
+#define TP_CMM_TCB_BASE_A 0x7d10
+#define TP_CMM_MM_BASE_A 0x7d14
+#define TP_CMM_TIMER_BASE_A 0x7d18
+#define TP_PMM_TX_BASE_A 0x7d20
+#define TP_PMM_RX_BASE_A 0x7d28
+#define TP_PMM_RX_PAGE_SIZE_A 0x7d2c
+#define TP_PMM_RX_MAX_PAGE_A 0x7d30
+#define TP_PMM_TX_PAGE_SIZE_A 0x7d34
+#define TP_PMM_TX_MAX_PAGE_A 0x7d38
+#define TP_CMM_MM_MAX_PSTRUCT_A 0x7e6c
+
+#define PMRXNUMCHN_S 31
+#define PMRXNUMCHN_V(x) ((x) << PMRXNUMCHN_S)
+#define PMRXNUMCHN_F PMRXNUMCHN_V(1U)
+
+#define PMTXNUMCHN_S 30
+#define PMTXNUMCHN_M 0x3U
+#define PMTXNUMCHN_G(x) (((x) >> PMTXNUMCHN_S) & PMTXNUMCHN_M)
+
+#define PMTXMAXPAGE_S 0
+#define PMTXMAXPAGE_M 0x1fffffU
+#define PMTXMAXPAGE_G(x) (((x) >> PMTXMAXPAGE_S) & PMTXMAXPAGE_M)
+
+#define PMRXMAXPAGE_S 0
+#define PMRXMAXPAGE_M 0x1fffffU
+#define PMRXMAXPAGE_G(x) (((x) >> PMRXMAXPAGE_S) & PMRXMAXPAGE_M)
+
#define DBGLAMODE_S 14
#define DBGLAMODE_M 0x3U
#define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M)
@@ -1338,6 +1412,9 @@
#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
#define TP_RSS_LKP_TABLE_A 0x7dec
+#define TP_CMM_MM_RX_FLST_BASE_A 0x7e60
+#define TP_CMM_MM_TX_FLST_BASE_A 0x7e64
+#define TP_CMM_MM_PS_FLST_BASE_A 0x7e68
#define LKPTBLROWVLD_S 31
#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
@@ -1483,6 +1560,11 @@
#define TP_MIB_RQE_DFR_PKT_A 0x64
#define ULP_TX_INT_CAUSE_A 0x8dcc
+#define ULP_TX_TPT_LLIMIT_A 0x8dd4
+#define ULP_TX_TPT_ULIMIT_A 0x8dd8
+#define ULP_TX_PBL_LLIMIT_A 0x8ddc
+#define ULP_TX_PBL_ULIMIT_A 0x8de0
+#define ULP_TX_ERR_TABLE_BASE_A 0x8e04
#define PBL_BOUND_ERR_CH3_S 31
#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
@@ -2247,12 +2329,32 @@
#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
#define MATCHSRAM_F MATCHSRAM_V(1U)
+#define MPS_RX_PG_RSV0_A 0x11010
+#define MPS_RX_PG_RSV4_A 0x11020
#define MPS_RX_PERR_INT_CAUSE_A 0x11074
+#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
+#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
+#define USED_S 16
+#define USED_M 0x7ffU
+#define USED_G(x) (((x) >> USED_S) & USED_M)
+
+#define ALLOC_S 0
+#define ALLOC_M 0x7ffU
+#define ALLOC_G(x) (((x) >> ALLOC_S) & ALLOC_M)
+
+#define T5_USED_S 16
+#define T5_USED_M 0xfffU
+#define T5_USED_G(x) (((x) >> T5_USED_S) & T5_USED_M)
+
+#define T5_ALLOC_S 0
+#define T5_ALLOC_M 0xfffU
+#define T5_ALLOC_G(x) (((x) >> T5_ALLOC_S) & T5_ALLOC_M)
+
#define DMACH_S 0
#define DMACH_M 0xffffU
#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
@@ -2410,8 +2512,21 @@
#define SLVFIFOPARINT_F SLVFIFOPARINT_V(1U)
#define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_LLIMIT_A 0x1915c
+#define ULP_RX_ISCSI_ULIMIT_A 0x19160
#define ULP_RX_ISCSI_TAGMASK_A 0x19164
#define ULP_RX_ISCSI_PSZ_A 0x19168
+#define ULP_RX_TDDP_LLIMIT_A 0x1916c
+#define ULP_RX_TDDP_ULIMIT_A 0x19170
+#define ULP_RX_STAG_LLIMIT_A 0x1917c
+#define ULP_RX_STAG_ULIMIT_A 0x19180
+#define ULP_RX_RQ_LLIMIT_A 0x19184
+#define ULP_RX_RQ_ULIMIT_A 0x19188
+#define ULP_RX_PBL_LLIMIT_A 0x1918c
+#define ULP_RX_PBL_ULIMIT_A 0x19190
+#define ULP_RX_CTX_BASE_A 0x19194
+#define ULP_RX_RQUDP_LLIMIT_A 0x191a4
+#define ULP_RX_RQUDP_ULIMIT_A 0x191a8
#define ULP_RX_LA_CTL_A 0x1923c
#define ULP_RX_LA_RDPTR_A 0x19240
#define ULP_RX_LA_RDDATA_A 0x19244
@@ -2473,6 +2588,10 @@
#define SOURCEPF_M 0x7U
#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
+#define T6_SOURCEPF_S 9
+#define T6_SOURCEPF_M 0x7U
+#define T6_SOURCEPF_G(x) (((x) >> T6_SOURCEPF_S) & T6_SOURCEPF_M)
+
#define PL_INT_CAUSE_A 0x1940c
#define ULP_TX_S 27
@@ -2612,7 +2731,20 @@
#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
#define T6_LIPMISS_F T6_LIPMISS_V(1U)
+#define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_SERVER_INDEX_A 0x19c18
+#define LE_DB_SRVR_START_INDEX_A 0x19c18
+#define LE_DB_ACT_CNT_IPV4_A 0x19c20
+#define LE_DB_ACT_CNT_IPV6_A 0x19c24
+#define LE_DB_HASH_TID_BASE_A 0x19c30
+#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
#define LE_DB_INT_CAUSE_A 0x19c3c
+#define LE_DB_TID_HASHBASE_A 0x19df8
+#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
+
+#define HASHEN_S 20
+#define HASHEN_V(x) ((x) << HASHEN_S)
+#define HASHEN_F HASHEN_V(1U)
#define REQQPARERR_S 16
#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
@@ -2634,6 +2766,10 @@
#define LIP0_V(x) ((x) << LIP0_S)
#define LIP0_F LIP0_V(1U)
+#define BASEADDR_S 3
+#define BASEADDR_M 0x1fffffffU
+#define BASEADDR_G(x) (((x) >> BASEADDR_S) & BASEADDR_M)
+
#define TCAMINTPERR_S 13
#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
#define TCAMINTPERR_F TCAMINTPERR_V(1U)
@@ -2740,10 +2876,11 @@
#define EDC_H_BIST_DATA_PATTERN_A 0x50010
#define EDC_H_BIST_STATUS_RDATA_A 0x50028
+#define EDC_H_ECC_ERR_ADDR_A 0x50084
#define EDC_T51_BASE_ADDR 0x50800
-#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
-#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
#define PL_VF_REV_A 0x4
#define PL_VF_WHOAMI_A 0x0
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index ad53e5ad2acd..fa3786a9d30e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1898,7 +1898,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
rspq->unhandled_irqs++;
val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
- if (is_t4(rspq->adapter->params.chip)) {
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!rspq->bar2_addr)) {
t4_write_reg(rspq->adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID_V((u32)rspq->cntxt_id));
@@ -1998,10 +2001,13 @@ static unsigned int process_intrq(struct adapter *adapter)
}
val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
- if (is_t4(adapter->params.chip))
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!intrq->bar2_addr)) {
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID_V(intrq->cntxt_id));
- else {
+ } else {
writel(val | INGRESSQID_V(intrq->bar2_qid),
intrq->bar2_addr + SGE_UDB_GTS);
wmb();
@@ -2662,8 +2668,22 @@ int t4vf_sge_init(struct adapter *adapter)
* give it more Free List entries. (Note that the SGE's Egress
* Congestion Threshold is in units of 2 Free List pointers.)
*/
- s->fl_starve_thres
- = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
+ switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+ case CHELSIO_T4:
+ s->fl_starve_thres =
+ EGRTHRESHOLD_G(sge_params->sge_congestion_control);
+ break;
+ case CHELSIO_T5:
+ s->fl_starve_thres =
+ EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+ break;
+ case CHELSIO_T6:
+ default:
+ s->fl_starve_thres =
+ T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+ break;
+ }
+ s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
/*
* Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 0db6dc9e9ed2..63dd5fdac5b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -619,7 +619,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
*/
whoami = t4_read_reg(adapter,
T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
- pf = SOURCEPF_G(whoami);
+ pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f3f1601a76f3..f44a39c40642 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -224,7 +224,8 @@ static int enic_get_coalesce(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
- ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
if (rxcoal->use_adaptive_rx_coalesce)
ecmd->use_adaptive_rx_coalesce = 1;
@@ -234,6 +235,53 @@ static int enic_get_coalesce(struct net_device *netdev,
return 0;
}
+static int enic_coalesce_valid(struct enic *enic,
+ struct ethtool_coalesce *ec)
+{
+ u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
+ u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
+ ec->rx_coalesce_usecs_high);
+ u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
+ ec->rx_coalesce_usecs_low);
+
+ if (ec->rx_max_coalesced_frames ||
+ ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_max_coalesced_frames ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->tx_max_coalesced_frames_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval)
+ return -EINVAL;
+
+ if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
+ ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs > coalesce_usecs_max) ||
+ (ec->rx_coalesce_usecs > coalesce_usecs_max) ||
+ (ec->rx_coalesce_usecs_low > coalesce_usecs_max) ||
+ (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
+ netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
+ coalesce_usecs_max);
+
+ if (ec->rx_coalesce_usecs_high &&
+ (rx_coalesce_usecs_high <
+ rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+ return -EINVAL;
+
+ return 0;
+}
+
static int enic_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ecmd)
{
@@ -244,8 +292,12 @@ static int enic_set_coalesce(struct net_device *netdev,
u32 rx_coalesce_usecs_high;
u32 coalesce_usecs_max;
unsigned int i, intr;
+ int ret;
struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
+ ret = enic_coalesce_valid(enic, ecmd);
+ if (ret)
+ return ret;
coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
coalesce_usecs_max);
@@ -257,59 +309,24 @@ static int enic_set_coalesce(struct net_device *netdev,
rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
coalesce_usecs_max);
- switch (vnic_dev_get_intr_mode(enic->vdev)) {
- case VNIC_DEV_INTR_MODE_INTX:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
- if (ecmd->use_adaptive_rx_coalesce ||
- ecmd->rx_coalesce_usecs_low ||
- ecmd->rx_coalesce_usecs_high)
- return -EINVAL;
-
- intr = enic_legacy_io_intr();
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSI:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
- if (ecmd->use_adaptive_rx_coalesce ||
- ecmd->rx_coalesce_usecs_low ||
- ecmd->rx_coalesce_usecs_high)
- return -EINVAL;
-
- vnic_intr_coalescing_timer_set(&enic->intr[0],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSIX:
- if (ecmd->rx_coalesce_usecs_high &&
- (rx_coalesce_usecs_high <
- rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
- return -EINVAL;
-
+ if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
for (i = 0; i < enic->wq_count; i++) {
intr = enic_msix_wq_intr(enic, i);
vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- }
-
- rxcoal->use_adaptive_rx_coalesce =
- !!ecmd->use_adaptive_rx_coalesce;
- if (!rxcoal->use_adaptive_rx_coalesce)
- enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-
- if (ecmd->rx_coalesce_usecs_high) {
- rxcoal->range_end = rx_coalesce_usecs_high;
- rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
- rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
- ENIC_AIC_LARGE_PKT_DIFF;
+ tx_coalesce_usecs);
}
- break;
- default:
- break;
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ }
+ rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
+ if (!rxcoal->use_adaptive_rx_coalesce)
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+ if (ecmd->rx_coalesce_usecs_high) {
+ rxcoal->range_end = rx_coalesce_usecs_high;
+ rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+ rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+ ENIC_AIC_LARGE_PKT_DIFF;
}
- enic->tx_coalesce_usecs = tx_coalesce_usecs;
enic->rx_coalesce_usecs = rx_coalesce_usecs;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 918a8e42139b..8f646e4e968b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1149,6 +1149,64 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ u32 timer = cq->tobe_rx_coal_timeval;
+
+ if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+ }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+ int index;
+ u32 timer;
+ u32 range_start;
+ u32 traffic;
+ u64 delta;
+ ktime_t now = ktime_get();
+
+ delta = ktime_us_delta(now, cq->prev_ts);
+ if (delta < ENIC_AIC_TS_BREAK)
+ return;
+ cq->prev_ts = now;
+
+ traffic = pkt_size_counter->large_pkt_bytes_cnt +
+ pkt_size_counter->small_pkt_bytes_cnt;
+ /* The table takes Mbps
+ * traffic *= 8 => bits
+ * traffic *= (10^6 / delta) => bps
+ * traffic /= 10^6 => Mbps
+ *
+ * Combining, traffic *= (8 / delta)
+ */
+
+ traffic <<= 3;
+ traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
+
+ for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+ if (traffic < mod_table[index].rx_rate)
+ break;
+ range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+ pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+ rx_coal->small_pkt_range_start :
+ rx_coal->large_pkt_range_start;
+ timer = range_start + ((rx_coal->range_end - range_start) *
+ mod_table[index].range_percent / 100);
+ /* Damping */
+ cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+ pkt_size_counter->large_pkt_bytes_cnt = 0;
+ pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
static int enic_poll(struct napi_struct *napi, int budget)
{
struct net_device *netdev = napi->dev;
@@ -1199,6 +1257,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
if (err)
rq_work_done = rq_work_to_do;
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ /* Call the function which refreshes the intr coalescing timer
+ * value based on the traffic.
+ */
+ enic_calc_int_moderation(enic, &enic->rq[0]);
if (rq_work_done < rq_work_to_do) {
@@ -1207,70 +1270,14 @@ static int enic_poll(struct napi_struct *napi, int budget)
*/
napi_complete(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
}
return rq_work_done;
}
-static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
- unsigned int intr = enic_msix_rq_intr(enic, rq->index);
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- u32 timer = cq->tobe_rx_coal_timeval;
-
- if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
- vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
- cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
- }
-}
-
-static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
- int index;
- u32 timer;
- u32 range_start;
- u32 traffic;
- u64 delta;
- ktime_t now = ktime_get();
-
- delta = ktime_us_delta(now, cq->prev_ts);
- if (delta < ENIC_AIC_TS_BREAK)
- return;
- cq->prev_ts = now;
-
- traffic = pkt_size_counter->large_pkt_bytes_cnt +
- pkt_size_counter->small_pkt_bytes_cnt;
- /* The table takes Mbps
- * traffic *= 8 => bits
- * traffic *= (10^6 / delta) => bps
- * traffic /= 10^6 => Mbps
- *
- * Combining, traffic *= (8 / delta)
- */
-
- traffic <<= 3;
- traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
-
- for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
- if (traffic < mod_table[index].rx_rate)
- break;
- range_start = (pkt_size_counter->small_pkt_bytes_cnt >
- pkt_size_counter->large_pkt_bytes_cnt << 1) ?
- rx_coal->small_pkt_range_start :
- rx_coal->large_pkt_range_start;
- timer = range_start + ((rx_coal->range_end - range_start) *
- mod_table[index].range_percent / 100);
- /* Damping */
- cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
-
- pkt_size_counter->large_pkt_bytes_cnt = 0;
- pkt_size_counter->small_pkt_bytes_cnt = 0;
-}
-
#ifdef CONFIG_RFS_ACCEL
static void enic_free_rx_cpu_rmap(struct enic *enic)
{
@@ -1407,10 +1414,8 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
if (err)
work_done = work_to_do;
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- /* Call the function which refreshes
- * the intr coalescing timer value based on
- * the traffic. This is supported only in
- * the case of MSI-x mode
+ /* Call the function which refreshes the intr coalescing timer
+ * value based on the traffic.
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
@@ -1569,12 +1574,6 @@ static void enic_set_rx_coal_setting(struct enic *enic)
int index = -1;
struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
- /* If intr mode is not MSIX, do not do adaptive coalescing */
- if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
- netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
- return;
- }
-
/* 1. Read the link speed from fw
* 2. Pick the default range for the speed
* 3. Update it in enic->rx_coalesce_setting
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index d1017509b08a..f7b42483921c 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -604,19 +604,7 @@ static struct pci_driver pci_driver = {
.probe = ec_bhf_probe,
.remove = ec_bhf_remove,
};
-
-static int __init ec_bhf_init(void)
-{
- return pci_register_driver(&pci_driver);
-}
-
-static void __exit ec_bhf_exit(void)
-{
- pci_unregister_driver(&pci_driver);
-}
-
-module_init(ec_bhf_init);
-module_exit(ec_bhf_exit);
+module_pci_driver(pci_driver);
module_param(polling_frequency, long, S_IRUGO);
MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8d12b41b3b19..0a27805cbbbd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.6.0.2"
+#define DRV_VER "10.6.0.3"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -105,6 +105,8 @@
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
+#define CNTL_SERIAL_NUM_WORDS 8 /* Controller serial number words */
+#define CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */
#define RSS_INDIR_TABLE_LEN 128
#define RSS_HASH_KEY_LEN 40
@@ -228,6 +230,7 @@ struct be_mcc_obj {
struct be_tx_stats {
u64 tx_bytes;
u64 tx_pkts;
+ u64 tx_vxlan_offload_pkts;
u64 tx_reqs;
u64 tx_compl;
ulong tx_jiffies;
@@ -275,6 +278,7 @@ struct be_rx_page_info {
struct be_rx_stats {
u64 rx_bytes;
u64 rx_pkts;
+ u64 rx_vxlan_offload_pkts;
u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */
@@ -590,6 +594,7 @@ struct be_adapter {
struct rss_info rss_info;
/* Filters for packets that need to be sent to BMC */
u32 bmc_filt_mask;
+ u16 serial_num[CNTL_SERIAL_NUM_WORDS];
};
#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 9eac3227d2ca..3be1fbdcdd02 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -88,19 +88,21 @@ static inline void *embedded_payload(struct be_mcc_wrb *wrb)
return wrb->payload.embedded_payload;
}
-static void be_mcc_notify(struct be_adapter *adapter)
+static int be_mcc_notify(struct be_adapter *adapter)
{
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
if (be_check_error(adapter, BE_ERROR_ANY))
- return;
+ return -EIO;
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
wmb();
iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+
+ return 0;
}
/* To check if valid bit is set, check the entire word as we don't know
@@ -170,6 +172,12 @@ static void be_async_cmd_process(struct be_adapter *adapter,
return;
}
+ if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
+ subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+ complete(&adapter->et_cmd_compl);
+ return;
+ }
+
if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
opcode == OPCODE_COMMON_WRITE_OBJECT) &&
subsystem == CMD_SUBSYSTEM_COMMON) {
@@ -541,7 +549,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto out;
status = be_mcc_wait_compl(adapter);
if (status == -EIO)
@@ -1547,7 +1557,10 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
else
hdr->version = 2;
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
+
adapter->stats_cmd_sent = true;
err:
@@ -1583,7 +1596,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
req->cmd_params.params.reset_stats = 0;
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
+
adapter->stats_cmd_sent = true;
err:
@@ -1687,8 +1703,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
sizeof(*req), wrb, NULL);
- be_mcc_notify(adapter);
-
+ status = be_mcc_notify(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1860,7 +1875,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
cpu_to_le32(set_eqd[i].delay_multiplier);
}
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1953,7 +1968,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
}
- status = be_mcc_notify_wait(adapter);
+ status = be_mcc_notify(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2320,7 +2335,10 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
sizeof(struct lancer_cmd_req_write_object)));
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2491,7 +2509,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->params.op_code = cpu_to_le32(flash_opcode);
req->params.data_buf_size = cpu_to_le32(buf_size);
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2585,7 +2606,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
- goto err;
+ goto err_unlock;
}
req = embedded_payload(wrb);
@@ -2599,8 +2620,19 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
req->loopback_type = loopback_type;
req->loopback_state = enable;
- status = be_mcc_notify_wait(adapter);
-err:
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
+ status = -ETIMEDOUT;
+
+ return status;
+
+err_unlock:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2636,7 +2668,9 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req->num_pkts = cpu_to_le32(num_pkts);
req->loopback_type = cpu_to_le32(loopback_type);
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
spin_unlock_bh(&adapter->mcc_lock);
@@ -2818,10 +2852,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
struct be_cmd_req_cntl_attribs *req;
struct be_cmd_resp_cntl_attribs *resp;
- int status;
+ int status, i;
int payload_len = max(sizeof(*req), sizeof(*resp));
struct mgmt_controller_attrib *attribs;
struct be_dma_mem attribs_cmd;
+ u32 *serial_num;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -2852,6 +2887,10 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
if (!status) {
attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
adapter->hba_port_num = attribs->hba_attribs.phy_port;
+ serial_num = attribs->hba_attribs.controller_serial_number;
+ for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
+ adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
+ (BIT_MASK(16) - 1);
}
err:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 00e3a6b6b822..7d178bdb112e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1500,6 +1500,8 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
#define BE_PME_D3COLD_CAP 0x80
/********************** LoopBack test *********************/
+#define SET_LB_MODE_TIMEOUT 12000
+
struct be_cmd_req_loopback_test {
struct be_cmd_req_hdr hdr;
u32 loopback_type;
@@ -1640,10 +1642,12 @@ struct be_cmd_req_set_qos {
struct mgmt_hba_attribs {
u32 rsvd0[24];
u8 controller_model_number[32];
- u32 rsvd1[79];
- u8 rsvd2[3];
+ u32 rsvd1[16];
+ u32 controller_serial_number[8];
+ u32 rsvd2[55];
+ u8 rsvd3[3];
u8 phy_port;
- u32 rsvd3[13];
+ u32 rsvd4[13];
} __packed;
struct mgmt_controller_attrib {
@@ -1763,6 +1767,7 @@ struct be_cmd_req_set_mac_list {
/*********************** HSW Config ***********************/
#define PORT_FWD_TYPE_VEPA 0x3
#define PORT_FWD_TYPE_VEB 0x2
+#define PORT_FWD_TYPE_PASSTHRU 0x1
#define ENABLE_MAC_SPOOFCHK 0x2
#define DISABLE_MAC_SPOOFCHK 0x3
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b2476dbfd103..2c9ed1710ba6 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -138,6 +138,7 @@ static const struct be_ethtool_stat et_stats[] = {
static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
{DRVSTAT_RX_INFO(rx_compl)},
{DRVSTAT_RX_INFO(rx_compl_err)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
@@ -190,6 +191,7 @@ static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_internal_parity_err)},
{DRVSTAT_TX_INFO(tx_bytes)},
{DRVSTAT_TX_INFO(tx_pkts)},
+ {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
/* Number of skbs queued for trasmission by the driver */
{DRVSTAT_TX_INFO(tx_reqs)},
/* Number of times the TX queue was stopped due to lack
@@ -847,10 +849,21 @@ err:
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
+ int ret;
+
+ ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ loopback_type, 1);
+ if (ret)
+ return ret;
+
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
loopback_type, 1500, 2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
+
+ ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ BE_NO_LOOPBACK, 1);
+ if (ret)
+ return ret;
+
return *status;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c28e3bfdccd7..15cc3a1f12ff 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -681,11 +681,14 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
{
struct be_tx_stats *stats = tx_stats(txo);
+ u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
u64_stats_update_begin(&stats->sync);
stats->tx_reqs++;
stats->tx_bytes += skb->len;
- stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
+ stats->tx_pkts += tx_pkts;
+ if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+ stats->tx_vxlan_offload_pkts += tx_pkts;
u64_stats_update_end(&stats->sync);
}
@@ -1258,7 +1261,7 @@ static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
if (is_udp_pkt((*skb))) {
struct udphdr *udp = udp_hdr((*skb));
- switch (udp->dest) {
+ switch (ntohs(udp->dest)) {
case DHCP_CLIENT_PORT:
os2bmc = is_dhcp_client_filt_enabled(adapter);
goto done;
@@ -1961,6 +1964,8 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
stats->rx_compl++;
stats->rx_bytes += rxcp->pkt_size;
stats->rx_pkts++;
+ if (rxcp->tunneled)
+ stats->rx_vxlan_offload_pkts++;
if (rxcp->pkt_type == BE_MULTICAST_PACKET)
stats->rx_mcast_pkts++;
if (rxcp->err)
@@ -3610,15 +3615,15 @@ err:
static int be_setup_wol(struct be_adapter *adapter, bool enable)
{
+ struct device *dev = &adapter->pdev->dev;
struct be_dma_mem cmd;
- int status = 0;
u8 mac[ETH_ALEN];
+ int status;
eth_zero_addr(mac);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
+ cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -3627,24 +3632,18 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
PCICFG_PM_CONTROL_OFFSET,
PCICFG_PM_CONTROL_MASK);
if (status) {
- dev_err(&adapter->pdev->dev,
- "Could not enable Wake-on-lan\n");
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
- cmd.dma);
- return status;
+ dev_err(dev, "Could not enable Wake-on-lan\n");
+ goto err;
}
- status = be_cmd_enable_magic_wol(adapter,
- adapter->netdev->dev_addr,
- &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
- pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
} else {
- status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
- pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+ ether_addr_copy(mac, adapter->netdev->dev_addr);
}
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+err:
+ dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -4977,7 +4976,7 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
{
if (!fhdr) {
dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
- return -1;
+ return false;
}
/* First letter of the build version is used to identify
@@ -5132,9 +5131,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
int status = 0;
u8 hsw_mode;
- if (!sriov_enabled(adapter))
- return 0;
-
/* BE and Lancer chips support VEB mode only */
if (BEx_chip(adapter) || lancer_chip(adapter)) {
hsw_mode = PORT_FWD_TYPE_VEB;
@@ -5144,6 +5140,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
NULL);
if (status)
return 0;
+
+ if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
+ return 0;
}
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
@@ -5278,6 +5277,27 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
}
#endif
+static int be_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
+ struct be_adapter *adapter = netdev_priv(dev);
+ u8 *id;
+
+ if (MAX_PHYS_ITEM_ID_LEN < id_len)
+ return -ENOSPC;
+
+ ppid->id[0] = adapter->hba_port_num + 1;
+ id = &ppid->id[1];
+ for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
+ i--, id += CNTL_SERIAL_NUM_WORD_SZ)
+ memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
+
+ ppid->id_len = id_len;
+
+ return 0;
+}
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -5308,6 +5328,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_del_vxlan_port = be_del_vxlan_port,
.ndo_features_check = be_features_check,
#endif
+ .ndo_get_phys_port_id = be_get_phys_port_id,
};
static void be_netdev_init(struct net_device *netdev)
@@ -5866,7 +5887,6 @@ static int be_pci_resume(struct pci_dev *pdev)
if (status)
return status;
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
status = be_resume(adapter);
@@ -5946,7 +5966,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
pci_set_master(pdev);
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 271bb5862346..787da8e54e99 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -364,7 +364,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
-static int
+static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
struct net_device *ndev)
@@ -439,10 +439,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp->cbd_sc = status;
}
- txq->cur_tx = bdp;
-
- return 0;
-
+ return bdp;
dma_mapping_error:
bdp = txq->cur_tx;
for (i = 0; i < frag; i++) {
@@ -450,7 +447,7 @@ dma_mapping_error:
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE);
}
- return NETDEV_TX_OK;
+ return ERR_PTR(-ENOMEM);
}
static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
@@ -467,7 +464,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
unsigned int estatus = 0;
unsigned int index;
int entries_free;
- int ret;
entries_free = fec_enet_get_free_txdesc_num(fep, txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -485,6 +481,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
/* Fill in a Tx ring entry */
bdp = txq->cur_tx;
+ last_bdp = bdp;
status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS;
@@ -513,9 +510,9 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
}
if (nr_frags) {
- ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
- if (ret)
- return ret;
+ last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
+ if (IS_ERR(last_bdp))
+ return NETDEV_TX_OK;
} else {
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
if (fep->bufdesc_ex) {
@@ -544,7 +541,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = estatus;
}
- last_bdp = txq->cur_tx;
index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */
txq->tx_skbuff[index] = skb;
@@ -563,6 +559,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
skb_tx_timestamp(skb);
+ /* Make sure the update to bdp and tx_skbuff are performed before
+ * cur_tx.
+ */
+ wmb();
txq->cur_tx = bdp;
/* Trigger transmission start */
@@ -1218,10 +1218,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* get next bdp of dirty_tx */
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
- while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
-
- /* current queue is empty */
- if (bdp == txq->cur_tx)
+ while (bdp != READ_ONCE(txq->cur_tx)) {
+ /* Order the load of cur_tx and cbd_sc */
+ rmb();
+ status = READ_ONCE(bdp->cbd_sc);
+ if (status & BD_ENET_TX_READY)
break;
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
@@ -1275,6 +1276,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
+ /* Make sure the update to bdp and tx_skbuff are performed
+ * before dirty_tx
+ */
+ wmb();
txq->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
@@ -1774,7 +1779,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
int ret = 0;
ret = pm_runtime_get_sync(dev);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
fep->mii_timeout = 0;
@@ -1813,7 +1818,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
int ret = 0;
ret = pm_runtime_get_sync(dev);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
fep->mii_timeout = 0;
@@ -2865,7 +2870,7 @@ fec_enet_open(struct net_device *ndev)
int ret;
ret = pm_runtime_get_sync(&fep->pdev->dev);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
pinctrl_pm_select_default_state(&fep->pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f457a23d0bfb..1543cf0e8ef6 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -506,12 +506,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
break;
default:
- /*
- * register RXMTRL must be set in order to do V1 packets,
- * therefore it is not possible to time stamp both V1 Sync and
- * Delay_Req messages and hardware does not support
- * timestamping all packets => return error
- */
fep->hwts_rx_en = 1;
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2b7610f341b0..087ffcdc48a3 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -109,15 +109,15 @@
#define TX_TIMEOUT (1*HZ)
-const char gfar_driver_version[] = "1.3";
+const char gfar_driver_version[] = "2.0";
static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
-static struct sk_buff *gfar_new_skb(struct net_device *dev,
- dma_addr_t *bufaddr);
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+ int alloc_cnt);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
@@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
bdp->lstatus = cpu_to_be32(lstatus);
}
-static int gfar_init_bds(struct net_device *ndev)
+static void gfar_init_bds(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *txbdp;
- struct rxbd8 *rxbdp;
u32 __iomem *rfbptr;
int i, j;
- dma_addr_t bufaddr;
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
@@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev)
rfbptr = &regs->rfbptr0;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->cur_rx = rx_queue->rx_bd_base;
- rx_queue->skb_currx = 0;
- rxbdp = rx_queue->rx_bd_base;
- for (j = 0; j < rx_queue->rx_ring_size; j++) {
- struct sk_buff *skb = rx_queue->rx_skbuff[j];
+ rx_queue->next_to_clean = 0;
+ rx_queue->next_to_use = 0;
+ rx_queue->next_to_alloc = 0;
- if (skb) {
- bufaddr = be32_to_cpu(rxbdp->bufPtr);
- } else {
- skb = gfar_new_skb(ndev, &bufaddr);
- if (!skb) {
- netdev_err(ndev, "Can't allocate RX buffers\n");
- return -ENOMEM;
- }
- rx_queue->rx_skbuff[j] = skb;
- }
-
- gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
- rxbdp++;
- }
+ /* make sure next_to_clean != next_to_use after this
+ * by leaving at least 1 unused descriptor
+ */
+ gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
rx_queue->rfbptr = rfbptr;
rfbptr += 2;
}
-
- return 0;
}
static int gfar_alloc_skb_resources(struct net_device *ndev)
{
void *vaddr;
dma_addr_t addr;
- int i, j, k;
+ int i, j;
struct gfar_private *priv = netdev_priv(ndev);
struct device *dev = priv->dev;
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
rx_queue = priv->rx_queue[i];
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
- rx_queue->dev = ndev;
+ rx_queue->ndev = ndev;
+ rx_queue->dev = dev;
addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
@@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
if (!tx_queue->tx_skbuff)
goto cleanup;
- for (k = 0; k < tx_queue->tx_ring_size; k++)
- tx_queue->tx_skbuff[k] = NULL;
+ for (j = 0; j < tx_queue->tx_ring_size; j++)
+ tx_queue->tx_skbuff[j] = NULL;
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->rx_skbuff =
- kmalloc_array(rx_queue->rx_ring_size,
- sizeof(*rx_queue->rx_skbuff),
- GFP_KERNEL);
- if (!rx_queue->rx_skbuff)
+ rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
+ sizeof(*rx_queue->rx_buff),
+ GFP_KERNEL);
+ if (!rx_queue->rx_buff)
goto cleanup;
-
- for (j = 0; j < rx_queue->rx_ring_size; j++)
- rx_queue->rx_skbuff[j] = NULL;
}
- if (gfar_init_bds(ndev))
- goto cleanup;
+ gfar_init_bds(ndev);
return 0;
@@ -354,10 +333,8 @@ static void gfar_init_rqprm(struct gfar_private *priv)
}
}
-static void gfar_rx_buff_size_config(struct gfar_private *priv)
+static void gfar_rx_offload_en(struct gfar_private *priv)
{
- int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
-
/* set this when rx hw offload (TOE) functions are being used */
priv->uses_rxfcb = 0;
@@ -366,16 +343,6 @@ static void gfar_rx_buff_size_config(struct gfar_private *priv)
if (priv->hwts_rx_en)
priv->uses_rxfcb = 1;
-
- if (priv->uses_rxfcb)
- frame_size += GMAC_FCB_LEN;
-
- frame_size += priv->padding;
-
- frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
-
- priv->rx_buffer_size = frame_size;
}
static void gfar_mac_rx_config(struct gfar_private *priv)
@@ -593,9 +560,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
if (!priv->rx_queue[i])
return -ENOMEM;
- priv->rx_queue[i]->rx_skbuff = NULL;
priv->rx_queue[i]->qindex = i;
- priv->rx_queue[i]->dev = priv->ndev;
+ priv->rx_queue[i]->ndev = priv->ndev;
}
return 0;
}
@@ -1187,12 +1153,11 @@ void gfar_mac_reset(struct gfar_private *priv)
udelay(3);
- /* Compute rx_buff_size based on config flags */
- gfar_rx_buff_size_config(priv);
+ gfar_rx_offload_en(priv);
/* Initialize the max receive frame/buffer lengths */
- gfar_write(&regs->maxfrm, priv->rx_buffer_size);
- gfar_write(&regs->mrblr, priv->rx_buffer_size);
+ gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
+ gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
/* Initialize the Minimum Frame Length Register */
gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
@@ -1200,12 +1165,11 @@ void gfar_mac_reset(struct gfar_private *priv)
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
- /* If the mtu is larger than the max size for standard
- * ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length
+ /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
+ * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
+ * and by checking RxBD[LG] and discarding larger than MAXFRM.
*/
- if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
+ if (gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
gfar_write(&regs->maccfg2, tempval);
@@ -1415,8 +1379,6 @@ static int gfar_probe(struct platform_device *ofdev)
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
- priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
@@ -1599,10 +1561,7 @@ static int gfar_restore(struct device *dev)
return 0;
}
- if (gfar_init_bds(ndev)) {
- free_skb_resources(priv);
- return -ENOMEM;
- }
+ gfar_init_bds(ndev);
gfar_mac_reset(priv);
@@ -1893,26 +1852,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
- struct rxbd8 *rxbdp;
- struct gfar_private *priv = netdev_priv(rx_queue->dev);
int i;
- rxbdp = rx_queue->rx_bd_base;
+ struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
+
+ if (rx_queue->skb)
+ dev_kfree_skb(rx_queue->skb);
for (i = 0; i < rx_queue->rx_ring_size; i++) {
- if (rx_queue->rx_skbuff[i]) {
- dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
- priv->rx_buffer_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
- rx_queue->rx_skbuff[i] = NULL;
- }
+ struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
+
rxbdp->lstatus = 0;
rxbdp->bufPtr = 0;
rxbdp++;
+
+ if (!rxb->page)
+ continue;
+
+ dma_unmap_single(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(rxb->page);
+
+ rxb->page = NULL;
}
- kfree(rx_queue->rx_skbuff);
- rx_queue->rx_skbuff = NULL;
+
+ kfree(rx_queue->rx_buff);
+ rx_queue->rx_buff = NULL;
}
/* If there are any tx skbs or rx skbs still around, free them.
@@ -1937,7 +1902,7 @@ static void free_skb_resources(struct gfar_private *priv)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- if (rx_queue->rx_skbuff)
+ if (rx_queue->rx_buff)
free_skb_rx_queue(rx_queue);
}
@@ -2495,7 +2460,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
struct gfar_private *priv = netdev_priv(dev);
int frame_size = new_mtu + ETH_HLEN;
- if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
netif_err(priv, drv, dev, "Invalid MTU setting\n");
return -EINVAL;
}
@@ -2549,15 +2514,6 @@ static void gfar_timeout(struct net_device *dev)
schedule_work(&priv->reset_task);
}
-static void gfar_align_skb(struct sk_buff *skb)
-{
- /* We need the data buffer to be aligned properly. We will reserve
- * as many bytes as needed to align the data properly
- */
- skb_reserve(skb, RXBUF_ALIGNMENT -
- (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
-}
-
/* Interrupt Handler for Transmit complete */
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{
@@ -2615,7 +2571,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps shhwtstamps;
- u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+ u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+ ~0x7UL);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
@@ -2664,49 +2621,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
-static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb;
+ struct page *page;
+ dma_addr_t addr;
- skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
- if (!skb)
- return NULL;
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
- gfar_align_skb(skb);
+ addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rxq->dev, addr))) {
+ __free_page(page);
- return skb;
+ return false;
+ }
+
+ rxb->dma = addr;
+ rxb->page = page;
+ rxb->page_offset = 0;
+
+ return true;
}
-static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
+static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb;
- dma_addr_t addr;
+ struct gfar_private *priv = netdev_priv(rx_queue->ndev);
+ struct gfar_extra_stats *estats = &priv->extra_stats;
- skb = gfar_alloc_skb(dev);
- if (!skb)
- return NULL;
+ netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
+ atomic64_inc(&estats->rx_alloc_err);
+}
- addr = dma_map_single(priv->dev, skb->data,
- priv->rx_buffer_size, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, addr))) {
- dev_kfree_skb_any(skb);
- return NULL;
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+ int alloc_cnt)
+{
+ struct rxbd8 *bdp;
+ struct gfar_rx_buff *rxb;
+ int i;
+
+ i = rx_queue->next_to_use;
+ bdp = &rx_queue->rx_bd_base[i];
+ rxb = &rx_queue->rx_buff[i];
+
+ while (alloc_cnt--) {
+ /* try reuse page */
+ if (unlikely(!rxb->page)) {
+ if (unlikely(!gfar_new_page(rx_queue, rxb))) {
+ gfar_rx_alloc_err(rx_queue);
+ break;
+ }
+ }
+
+ /* Setup the new RxBD */
+ gfar_init_rxbdp(rx_queue, bdp,
+ rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
+
+ /* Update to the next pointer */
+ bdp++;
+ rxb++;
+
+ if (unlikely(++i == rx_queue->rx_ring_size)) {
+ i = 0;
+ bdp = rx_queue->rx_bd_base;
+ rxb = rx_queue->rx_buff;
+ }
}
- *bufaddr = addr;
- return skb;
+ rx_queue->next_to_use = i;
+ rx_queue->next_to_alloc = i;
}
-static inline void count_errors(unsigned short status, struct net_device *dev)
+static void count_errors(u32 lstatus, struct net_device *ndev)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
/* If the packet was truncated, none of the other errors matter */
- if (status & RXBD_TRUNCATED) {
+ if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
stats->rx_length_errors++;
atomic64_inc(&estats->rx_trunc);
@@ -2714,25 +2707,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
return;
}
/* Count the errors, if there were any */
- if (status & (RXBD_LARGE | RXBD_SHORT)) {
+ if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
stats->rx_length_errors++;
- if (status & RXBD_LARGE)
+ if (lstatus & BD_LFLAG(RXBD_LARGE))
atomic64_inc(&estats->rx_large);
else
atomic64_inc(&estats->rx_short);
}
- if (status & RXBD_NONOCTET) {
+ if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
stats->rx_frame_errors++;
atomic64_inc(&estats->rx_nonoctet);
}
- if (status & RXBD_CRCERR) {
+ if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
atomic64_inc(&estats->rx_crcerr);
stats->rx_crc_errors++;
}
- if (status & RXBD_OVERRUN) {
+ if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
atomic64_inc(&estats->rx_overrun);
- stats->rx_crc_errors++;
+ stats->rx_over_errors++;
}
}
@@ -2783,6 +2776,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
return IRQ_HANDLED;
}
+static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+ struct sk_buff *skb, bool first)
+{
+ unsigned int size = lstatus & BD_LENGTH_MASK;
+ struct page *page = rxb->page;
+
+ /* Remove the FCS from the packet length */
+ if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+ size -= ETH_FCS_LEN;
+
+ if (likely(first))
+ skb_put(skb, size);
+ else
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
+
+ /* try reuse page */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* change offset to the other half */
+ rxb->page_offset ^= GFAR_RXB_TRUESIZE;
+
+ atomic_inc(&page->_count);
+
+ return true;
+}
+
+static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
+ struct gfar_rx_buff *old_rxb)
+{
+ struct gfar_rx_buff *new_rxb;
+ u16 nta = rxq->next_to_alloc;
+
+ new_rxb = &rxq->rx_buff[nta];
+
+ /* find next buf that can reuse a page */
+ nta++;
+ rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
+
+ /* copy page reference */
+ *new_rxb = *old_rxb;
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
+ old_rxb->page_offset,
+ GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
+ u32 lstatus, struct sk_buff *skb)
+{
+ struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
+ struct page *page = rxb->page;
+ bool first = false;
+
+ if (likely(!skb)) {
+ void *buff_addr = page_address(page) + rxb->page_offset;
+
+ skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
+ if (unlikely(!skb)) {
+ gfar_rx_alloc_err(rx_queue);
+ return NULL;
+ }
+ skb_reserve(skb, RXBUF_ALIGNMENT);
+ first = true;
+ }
+
+ dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
+ GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+
+ if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
+ /* reuse the free half of the page */
+ gfar_reuse_rx_page(rx_queue, rxb);
+ } else {
+ /* page cannot be reused, unmap it */
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear rxb content */
+ rxb->page = NULL;
+
+ return skb;
+}
+
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
/* If valid headers were found, and valid sums
@@ -2797,10 +2877,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
}
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
{
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(ndev);
struct rxfcb *fcb = NULL;
/* fcb is at the beginning if exists */
@@ -2809,10 +2888,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Remove the FCB from the skb
* Remove the padded bytes, if there are any
*/
- if (amount_pull) {
- skb_record_rx_queue(skb, fcb->rq);
- skb_pull(skb, amount_pull);
- }
+ if (priv->uses_rxfcb)
+ skb_pull(skb, GMAC_FCB_LEN);
/* Get receive timestamp from the skb */
if (priv->hwts_rx_en) {
@@ -2826,24 +2903,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
if (priv->padding)
skb_pull(skb, priv->padding);
- if (dev->features & NETIF_F_RXCSUM)
+ if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
/* Tell the skb what kind of packet this is */
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, ndev);
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
be16_to_cpu(fcb->flags) & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(fcb->vlctl));
-
- /* Send the packet up the stack */
- napi_gro_receive(napi, skb);
-
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2852,91 +2925,89 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
*/
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
{
- struct net_device *dev = rx_queue->dev;
- struct rxbd8 *bdp, *base;
- struct sk_buff *skb;
- int pkt_len;
- int amount_pull;
- int howmany = 0;
- struct gfar_private *priv = netdev_priv(dev);
+ struct net_device *ndev = rx_queue->ndev;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct rxbd8 *bdp;
+ int i, howmany = 0;
+ struct sk_buff *skb = rx_queue->skb;
+ int cleaned_cnt = gfar_rxbd_unused(rx_queue);
+ unsigned int total_bytes = 0, total_pkts = 0;
/* Get the first full descriptor */
- bdp = rx_queue->cur_rx;
- base = rx_queue->rx_bd_base;
+ i = rx_queue->next_to_clean;
- amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
+ while (rx_work_limit--) {
+ u32 lstatus;
+
+ if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
+ gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+ cleaned_cnt = 0;
+ }
- while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
- struct sk_buff *newskb;
- dma_addr_t bufaddr;
+ bdp = &rx_queue->rx_bd_base[i];
+ lstatus = be32_to_cpu(bdp->lstatus);
+ if (lstatus & BD_LFLAG(RXBD_EMPTY))
+ break;
+ /* order rx buffer descriptor reads */
rmb();
- /* Add another skb for the future */
- newskb = gfar_new_skb(dev, &bufaddr);
+ /* fetch next to clean buffer from the ring */
+ skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
+ if (unlikely(!skb))
+ break;
- skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
+ cleaned_cnt++;
+ howmany++;
- dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
- priv->rx_buffer_size, DMA_FROM_DEVICE);
-
- if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
- be16_to_cpu(bdp->length) > priv->rx_buffer_size))
- bdp->status = cpu_to_be16(RXBD_LARGE);
-
- /* We drop the frame if we failed to allocate a new buffer */
- if (unlikely(!newskb ||
- !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
- be16_to_cpu(bdp->status) & RXBD_ERR)) {
- count_errors(be16_to_cpu(bdp->status), dev);
-
- if (unlikely(!newskb)) {
- newskb = skb;
- bufaddr = be32_to_cpu(bdp->bufPtr);
- } else if (skb)
- dev_kfree_skb(skb);
- } else {
- /* Increment the number of packets */
- rx_queue->stats.rx_packets++;
- howmany++;
-
- if (likely(skb)) {
- pkt_len = be16_to_cpu(bdp->length) -
- ETH_FCS_LEN;
- /* Remove the FCS from the packet length */
- skb_put(skb, pkt_len);
- rx_queue->stats.rx_bytes += pkt_len;
- skb_record_rx_queue(skb, rx_queue->qindex);
- gfar_process_frame(dev, skb, amount_pull,
- &rx_queue->grp->napi_rx);
+ if (unlikely(++i == rx_queue->rx_ring_size))
+ i = 0;
- } else {
- netif_warn(priv, rx_err, dev, "Missing skb!\n");
- rx_queue->stats.rx_dropped++;
- atomic64_inc(&priv->extra_stats.rx_skbmissing);
- }
+ rx_queue->next_to_clean = i;
+
+ /* fetch next buffer if not the last in frame */
+ if (!(lstatus & BD_LFLAG(RXBD_LAST)))
+ continue;
+
+ if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
+ count_errors(lstatus, ndev);
+ /* discard faulty buffer */
+ dev_kfree_skb(skb);
+ skb = NULL;
+ rx_queue->stats.rx_dropped++;
+ continue;
}
- rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
+ /* Increment the number of packets */
+ total_pkts++;
+ total_bytes += skb->len;
- /* Setup the new bdp */
- gfar_init_rxbdp(rx_queue, bdp, bufaddr);
+ skb_record_rx_queue(skb, rx_queue->qindex);
- /* Update Last Free RxBD pointer for LFC */
- if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
- gfar_write(rx_queue->rfbptr, (u32)bdp);
+ gfar_process_frame(ndev, skb);
- /* Update to the next pointer */
- bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
+ /* Send the packet up the stack */
+ napi_gro_receive(&rx_queue->grp->napi_rx, skb);
- /* update to point at the next skb */
- rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
- RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+ skb = NULL;
}
- /* Update the current rxbd pointer to be the next one */
- rx_queue->cur_rx = bdp;
+ /* Store incomplete frames for completion */
+ rx_queue->skb = skb;
+
+ rx_queue->stats.rx_packets += total_pkts;
+ rx_queue->stats.rx_bytes += total_bytes;
+
+ if (cleaned_cnt)
+ gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+
+ /* Update Last Free RxBD pointer for LFC */
+ if (unlikely(priv->tx_actual_en)) {
+ u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+ gfar_write(rx_queue->rfbptr, bdp_dma);
+ }
return howmany;
}
@@ -3454,7 +3525,6 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
struct phy_device *phydev = priv->phydev;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
- struct rxbd8 *bdp;
if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
return;
@@ -3511,15 +3581,11 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
/* Turn last free buffer recording on */
if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
for (i = 0; i < priv->num_rx_queues; i++) {
+ u32 bdp_dma;
+
rx_queue = priv->rx_queue[i];
- bdp = rx_queue->cur_rx;
- /* skip to previous bd */
- bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
- rx_queue->rx_bd_base,
- rx_queue->rx_ring_size);
-
- if (rx_queue->rfbptr)
- gfar_write(rx_queue->rfbptr, (u32)bdp);
+ bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+ gfar_write(rx_queue->rfbptr, bdp_dma);
}
priv->tx_actual_en = 1;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 5545e4103368..8c1994856e93 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -71,11 +71,6 @@ struct ethtool_rx_list {
/* Number of bytes to align the rx bufs to */
#define RXBUF_ALIGNMENT 64
-/* The number of bytes which composes a unit for the purpose of
- * allocating data buffers. ie-for any given MTU, the data buffer
- * will be the next highest multiple of 512 bytes. */
-#define INCREMENTAL_BUFFER_SIZE 512
-
#define PHY_INIT_TIMEOUT 100000
#define DRV_NAME "gfar-enet"
@@ -92,6 +87,8 @@ extern const char gfar_driver_version[];
#define DEFAULT_TX_RING_SIZE 256
#define DEFAULT_RX_RING_SIZE 256
+#define GFAR_RX_BUFF_ALLOC 16
+
#define GFAR_RX_MAX_RING_SIZE 256
#define GFAR_TX_MAX_RING_SIZE 256
@@ -103,11 +100,14 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-#define DEFAULT_RX_BUFFER_SIZE 1536
+#define GFAR_RXB_SIZE 1536
+#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_TRUESIZE 2048
+
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
-#define JUMBO_BUFFER_SIZE 9728
-#define JUMBO_FRAME_SIZE 9600
+#define GFAR_JUMBO_FRAME_SIZE 9600
#define DEFAULT_FIFO_TX_THR 0x100
#define DEFAULT_FIFO_TX_STARVE 0x40
@@ -640,6 +640,7 @@ struct rmon_mib
};
struct gfar_extra_stats {
+ atomic64_t rx_alloc_err;
atomic64_t rx_large;
atomic64_t rx_short;
atomic64_t rx_nonoctet;
@@ -651,7 +652,6 @@ struct gfar_extra_stats {
atomic64_t eberr;
atomic64_t tx_babt;
atomic64_t tx_underrun;
- atomic64_t rx_skbmissing;
atomic64_t tx_timeout;
};
@@ -1012,34 +1012,42 @@ struct rx_q_stats {
unsigned long rx_dropped;
};
+struct gfar_rx_buff {
+ dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
+};
+
/**
* struct gfar_priv_rx_q - per rx queue structure
- * @rx_skbuff: skb pointers
- * @skb_currx: currently use skb pointer
+ * @rx_buff: Array of buffer info metadata structs
* @rx_bd_base: First rx buffer descriptor
- * @cur_rx: Next free rx ring entry
+ * @next_to_use: index of the next buffer to be alloc'd
+ * @next_to_clean: index of the next buffer to be cleaned
* @qindex: index of this queue
- * @dev: back pointer to the dev structure
+ * @ndev: back pointer to net_device
* @rx_ring_size: Rx ring size
* @rxcoalescing: enable/disable rx-coalescing
* @rxic: receive interrupt coalescing vlaue
*/
struct gfar_priv_rx_q {
- struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
- dma_addr_t rx_bd_dma_base;
+ struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
struct rxbd8 *rx_bd_base;
- struct rxbd8 *cur_rx;
- struct net_device *dev;
- struct gfar_priv_grp *grp;
+ struct net_device *ndev;
+ struct device *dev;
+ u16 rx_ring_size;
+ u16 qindex;
+ struct gfar_priv_grp *grp;
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+ struct sk_buff *skb;
struct rx_q_stats stats;
- u16 skb_currx;
- u16 qindex;
- unsigned int rx_ring_size;
- /* RX Coalescing values */
+ u32 __iomem *rfbptr;
unsigned char rxcoalescing;
unsigned long rxic;
- u32 __iomem *rfbptr;
+ dma_addr_t rx_bd_dma_base;
};
enum gfar_irqinfo_id {
@@ -1109,7 +1117,6 @@ struct gfar_private {
struct device *dev;
struct net_device *ndev;
enum gfar_errata errata;
- unsigned int rx_buffer_size;
u16 uses_rxfcb;
u16 padding;
@@ -1292,6 +1299,28 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
bdp->lstatus = cpu_to_be32(lstatus);
}
+static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
+{
+ if (rxq->next_to_clean > rxq->next_to_use)
+ return rxq->next_to_clean - rxq->next_to_use - 1;
+
+ return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
+}
+
+static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
+{
+ struct rxbd8 *bdp;
+ u32 bdp_dma;
+ int i;
+
+ i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
+ bdp = &rxq->rx_bd_base[i];
+ bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
+ bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
+
+ return bdp_dma;
+}
+
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 5b90fcf96265..6bdc89179b72 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
+ /* extra stats */
+ "rx-allocation-errors",
"rx-large-frame-errors",
"rx-short-frame-errors",
"rx-non-octet-errors",
@@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
"ethernet-bus-error",
"tx-babbling-errors",
"tx-underrun-errors",
- "rx-skb-missing-errors",
"tx-timeout-errors",
+ /* rmon stats */
"tx-rx-64-frames",
"tx-rx-65-127-frames",
"tx-rx-128-255-frames",
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d49bee38cd31..cc2d8b4b18e3 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -965,7 +965,6 @@ static struct platform_driver hip04_mac_driver = {
.remove = hip04_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = hip04_mac_match,
},
};
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
index b3bac25db99c..fca0a5be1f0f 100644
--- a/drivers/net/ethernet/hisilicon/hip04_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c
@@ -174,7 +174,6 @@ static struct platform_driver hip04_mdio_driver = {
.remove = hip04_mdio_remove,
.driver = {
.name = "hip04-mdio",
- .owner = THIS_MODULE,
.of_match_table = hip04_mdio_match,
},
};
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 29bbb628d712..7af870a3c549 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -79,6 +79,11 @@ static unsigned int rx_flush __read_mostly = 0;
module_param(rx_flush, uint, 0644);
MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
+static bool old_large_send __read_mostly;
+module_param(old_large_send, bool, S_IRUGO);
+MODULE_PARM_DESC(old_large_send,
+ "Use old large send method on firmware that supports the new method");
+
struct ibmveth_stat {
char name[ETH_GSTRING_LEN];
int offset;
@@ -101,7 +106,8 @@ struct ibmveth_stat ibmveth_stats[] = {
{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
{ "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
- { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
+ { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
+ { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
};
/* simple methods of getting data from the current rxq entry */
@@ -848,25 +854,91 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
return rc1 ? rc1 : rc2;
}
+static int ibmveth_set_tso(struct net_device *dev, u32 data)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ unsigned long set_attr, clr_attr, ret_attr;
+ long ret1, ret2;
+ int rc1 = 0, rc2 = 0;
+ int restart = 0;
+
+ if (netif_running(dev)) {
+ restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(dev);
+ adapter->pool_config = 0;
+ }
+
+ set_attr = 0;
+ clr_attr = 0;
+
+ if (data)
+ set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+ else
+ clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+
+ ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+ if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+ !old_large_send) {
+ ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+ set_attr, &ret_attr);
+
+ if (ret2 != H_SUCCESS) {
+ netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
+ data, ret2);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr, clr_attr, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ rc1 = -EIO;
+
+ } else {
+ adapter->fw_large_send_support = data;
+ adapter->large_send = data;
+ }
+ } else {
+ /* Older firmware version of large send offload does not
+ * support tcp6/ipv6
+ */
+ if (data == 1) {
+ dev->features &= ~NETIF_F_TSO6;
+ netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+ }
+ adapter->large_send = data;
+ }
+
+ if (restart)
+ rc2 = ibmveth_open(dev);
+
+ return rc1 ? rc1 : rc2;
+}
+
static int ibmveth_set_features(struct net_device *dev,
netdev_features_t features)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
- int rc;
- netdev_features_t changed = features ^ dev->features;
-
- if (features & NETIF_F_TSO & changed)
- netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+ int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
+ int rc1 = 0, rc2 = 0;
- if (rx_csum == adapter->rx_csum)
- return 0;
+ if (rx_csum != adapter->rx_csum) {
+ rc1 = ibmveth_set_csum_offload(dev, rx_csum);
+ if (rc1 && !adapter->rx_csum)
+ dev->features =
+ features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ }
- rc = ibmveth_set_csum_offload(dev, rx_csum);
- if (rc && !adapter->rx_csum)
- dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ if (large_send != adapter->large_send) {
+ rc2 = ibmveth_set_tso(dev, large_send);
+ if (rc2 && !adapter->large_send)
+ dev->features =
+ features & ~(NETIF_F_TSO | NETIF_F_TSO6);
+ }
- return rc;
+ return rc1 ? rc1 : rc2;
}
static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -917,7 +989,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
static int ibmveth_send(struct ibmveth_adapter *adapter,
- union ibmveth_buf_desc *descs)
+ union ibmveth_buf_desc *descs, unsigned long mss)
{
unsigned long correlator;
unsigned int retry_count;
@@ -934,7 +1006,8 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
descs[0].desc, descs[1].desc,
descs[2].desc, descs[3].desc,
descs[4].desc, descs[5].desc,
- correlator, &correlator);
+ correlator, &correlator, mss,
+ adapter->fw_large_send_support);
} while ((ret == H_BUSY) && (retry_count--));
if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -955,6 +1028,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
int last, i;
int force_bounce = 0;
dma_addr_t dma_addr;
+ unsigned long mss = 0;
/*
* veth handles a maximum of 6 segments including the header, so
@@ -980,6 +1054,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags = IBMVETH_BUF_VALID;
+ if (skb_is_gso(skb) && adapter->fw_large_send_support)
+ desc_flags |= IBMVETH_BUF_LRG_SND;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned char *buf = skb_transport_header(skb) +
skb->csum_offset;
@@ -1007,7 +1084,7 @@ retry_bounce:
descs[0].fields.flags_len = desc_flags | skb->len;
descs[0].fields.address = adapter->bounce_buffer_dma;
- if (ibmveth_send(adapter, descs)) {
+ if (ibmveth_send(adapter, descs, 0)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1041,16 +1118,23 @@ retry_bounce:
descs[i+1].fields.address = dma_addr;
}
- if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
- /* Put -1 in the IP checksum to tell phyp it
- * is a largesend packet and put the mss in the TCP checksum.
- */
- ip_hdr(skb)->check = 0xffff;
- tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
- adapter->tx_large_packets++;
+ if (skb_is_gso(skb)) {
+ if (adapter->fw_large_send_support) {
+ mss = (unsigned long)skb_shinfo(skb)->gso_size;
+ adapter->tx_large_packets++;
+ } else if (!skb_is_gso_v6(skb)) {
+ /* Put -1 in the IP checksum to tell phyp it
+ * is a largesend packet. Put the mss in
+ * the TCP checksum.
+ */
+ ip_hdr(skb)->check = 0xffff;
+ tcp_hdr(skb)->check =
+ cpu_to_be16(skb_shinfo(skb)->gso_size);
+ adapter->tx_large_packets++;
+ }
}
- if (ibmveth_send(adapter, descs)) {
+ if (ibmveth_send(adapter, descs, mss)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1401,6 +1485,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
unsigned int *mcastFilterSize_p;
+ long ret;
+ unsigned long ret_attr;
dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
dev->unit_address);
@@ -1449,10 +1535,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
SET_NETDEV_DEV(netdev, &dev->dev);
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
netdev->features |= netdev->hw_features;
- /* TSO is disabled by default */
- netdev->hw_features |= NETIF_F_TSO;
+ ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+ /* If running older firmware, TSO should not be enabled by default */
+ if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+ !old_large_send) {
+ netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ netdev->features |= netdev->hw_features;
+ } else {
+ netdev->hw_features |= NETIF_F_TSO;
+ }
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 41dedb1fb2ae..4eade67fe30c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -40,6 +40,8 @@
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
+#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
+#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
@@ -59,13 +61,20 @@
static inline long h_send_logical_lan(unsigned long unit_address,
unsigned long desc1, unsigned long desc2, unsigned long desc3,
unsigned long desc4, unsigned long desc5, unsigned long desc6,
- unsigned long corellator_in, unsigned long *corellator_out)
+ unsigned long corellator_in, unsigned long *corellator_out,
+ unsigned long mss, unsigned long large_send_support)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
- rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
- desc2, desc3, desc4, desc5, desc6, corellator_in);
+ if (large_send_support)
+ rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+ desc1, desc2, desc3, desc4, desc5, desc6,
+ corellator_in, mss);
+ else
+ rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+ desc1, desc2, desc3, desc4, desc5, desc6,
+ corellator_in);
*corellator_out = retbuf[0];
@@ -147,11 +156,13 @@ struct ibmveth_adapter {
struct ibmveth_rx_q rx_queue;
int pool_config;
int rx_csum;
+ int large_send;
void *bounce_buffer;
dma_addr_t bounce_buffer_dma;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
+ u64 fw_large_send_support;
/* adapter specific stats */
u64 replenish_task_cycles;
u64 replenish_no_mem;
@@ -182,6 +193,7 @@ struct ibmveth_buf_desc_fields {
#endif
#define IBMVETH_BUF_VALID 0x80000000
#define IBMVETH_BUF_TOGGLE 0x40000000
+#define IBMVETH_BUF_LRG_SND 0x04000000
#define IBMVETH_BUF_NO_CSUM 0x02000000
#define IBMVETH_BUF_CSUM_GOOD 0x01000000
#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 26459853c6be..34c551e322eb 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -106,14 +106,14 @@
#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
-#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
#define K1_ENTRY_LATENCY 0
#define K1_MIN_TIME 1
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
-
+#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 89d788d8f263..546b5da168dc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -48,7 +48,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -4280,18 +4280,29 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
+ u32 systimel_1, systimel_2, systimeh;
cycle_t systim, systim_next;
- /* SYSTIMH latching upon SYSTIML read does not work well. To fix that
- * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
- * to occur between reads, so if we read a vale close to overflow, we
- * wait for overflow to occur and read both registers when its safe.
+ /* SYSTIMH latching upon SYSTIML read does not work well.
+ * This means that if SYSTIML overflows after we read it but before
+ * we read SYSTIMH, the value of SYSTIMH has been incremented and we
+ * will experience a huge non linear increment in the systime value
+ * to fix that we test for overflow and if true, we re-read systime.
*/
- u32 systim_overflow_latch_fix = 0x3FFFFFFF;
-
- do {
- systim = (cycle_t)er32(SYSTIML);
- } while (systim > systim_overflow_latch_fix);
- systim |= (cycle_t)er32(SYSTIMH) << 32;
+ systimel_1 = er32(SYSTIML);
+ systimeh = er32(SYSTIMH);
+ systimel_2 = er32(SYSTIML);
+ /* Check for overflow. If there was no overflow, use the values */
+ if (systimel_1 < systimel_2) {
+ systim = (cycle_t)systimel_1;
+ systim |= (cycle_t)systimeh << 32;
+ } else {
+ /* There was an overflow, read again SYSTIMH, and use
+ * systimel_2
+ */
+ systimeh = er32(SYSTIMH);
+ systim = (cycle_t)systimel_2;
+ systim |= (cycle_t)systimeh << 32;
+ }
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
u64 incvalue, time_delta, rem, temp;
@@ -4588,6 +4599,7 @@ static int e1000_open(struct net_device *netdev)
return 0;
err_req_irq:
+ pm_qos_remove_request(&adapter->pm_qos_req);
e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter);
e1000e_free_rx_resources(adapter->rx_ring);
@@ -6316,6 +6328,33 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
return retval;
}
+ /* Ensure that the appropriate bits are set in LPI_CTRL
+ * for EEE in Sx
+ */
+ if ((hw->phy.type >= e1000_phy_i217) &&
+ adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) {
+ u16 lpi_ctrl = 0;
+
+ retval = hw->phy.ops.acquire(hw);
+ if (!retval) {
+ retval = e1e_rphy_locked(hw, I82579_LPI_CTRL,
+ &lpi_ctrl);
+ if (!retval) {
+ if (adapter->eee_advert &
+ hw->dev_spec.ich8lan.eee_lp_ability &
+ I82579_EEE_100_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ if (adapter->eee_advert &
+ hw->dev_spec.ich8lan.eee_lp_ability &
+ I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ retval = e1e_wphy_locked(hw, I82579_LPI_CTRL,
+ lpi_ctrl);
+ }
+ }
+ hw->phy.ops.release(hw);
+ }
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -6465,7 +6504,7 @@ static int __e1000_resume(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
aspm_disable_flag |= PCIE_LINK_STATE_L1;
if (aspm_disable_flag)
- e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
pci_set_master(pdev);
@@ -6743,7 +6782,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
aspm_disable_flag |= PCIE_LINK_STATE_L1;
if (aspm_disable_flag)
- e1000e_disable_aspm(pdev, aspm_disable_flag);
+ e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
err = pci_enable_device_mem(pdev);
if (err) {
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index b24e5fee17f2..1d5e0b77062a 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -38,8 +38,8 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
-#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
-#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
@@ -125,7 +125,6 @@
(0x054E4 + ((_i - 16) * 8)))
#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
-#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ec76c3fa3a04..0f97883c1493 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -79,10 +79,13 @@
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
-#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
+/* max 16 qps */
+#define i40e_default_queues_per_vmdq(pf) \
+ (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
-#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
+#define i40e_pf_get_max_q_per_tc(pf) \
+ (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
@@ -98,7 +101,7 @@
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0)
+#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -289,35 +292,42 @@ struct i40e_pf {
struct work_struct service_task;
u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1)
-#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2)
-#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3)
-#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
-#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
-#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
-#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
-#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
+#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
+#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
#ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11)
+#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
-#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
-#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
-#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
-#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
-#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
-#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21)
-#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22)
-#define I40E_FLAG_PTP (u64)(1 << 25)
-#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
+#define I40E_FLAG_IN_NETPOLL BIT_ULL(12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
+#define I40E_FLAG_PTP BIT_ULL(25)
+#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
#ifdef CONFIG_I40E_VXLAN
-#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
+#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27)
#endif
-#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
-#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
+#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29)
+#define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31)
+#define I40E_FLAG_HW_ATR_EVICT_CAPABLE BIT_ULL(32)
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33)
+#define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34)
+#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35)
+#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
/* tracks features that get auto disabled by errors */
@@ -443,8 +453,8 @@ struct i40e_vsi {
u32 current_netdev_flags;
unsigned long state;
-#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0)
-#define I40E_VSI_FLAG_VEB_OWNER (1<<1)
+#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
struct list_head mac_filter_list;
@@ -550,6 +560,7 @@ struct i40e_q_vector {
cpumask_t affinity_mask;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
+ bool arm_wb_state;
} ____cacheline_internodealigned_in_smp;
/* lan device */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 929e3d72a01e..95d23bfbcbf1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
+#define I40E_FW_API_VERSION_MINOR 0x0004
struct i40e_aq_desc {
__le16 flags;
@@ -132,12 +132,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
-
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -262,7 +257,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -274,8 +272,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
@@ -509,7 +505,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -532,7 +529,9 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
@@ -826,8 +825,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1068,6 +1071,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@@ -2064,6 +2068,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved1;
u8 oper_num_tc;
@@ -2177,6 +2187,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 0bae22da014d..114dc6450183 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -54,6 +54,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_20G_KR2:
hw->mac.type = I40E_MAC_XL710;
break;
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -72,6 +81,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
}
/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+ switch (stat_err) {
+ case 0:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -177,6 +392,169 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+ cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+ cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
@@ -563,6 +941,7 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
+ case I40E_MAC_X722:
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -1187,9 +1566,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
blink = false;
if (blink)
- gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
- gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
break;
@@ -2391,7 +2770,7 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10 0xF1
#define I40E_DEV_FUNC_CAP_CEM 0xF2
#define I40E_DEV_FUNC_CAP_IWARP 0x51
#define I40E_DEV_FUNC_CAP_LED 0x61
@@ -2416,6 +2795,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
+ u8 major_rev;
u32 i = 0;
u16 id;
@@ -2433,6 +2813,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
number = le32_to_cpu(cap->number);
logical_id = le32_to_cpu(cap->logical_id);
phys_id = le32_to_cpu(cap->phys_id);
+ major_rev = cap->major_rev;
switch (id) {
case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2507,9 +2888,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
case I40E_DEV_FUNC_CAP_MSIX_VF:
p->num_msix_vectors_vf = number;
break;
- case I40E_DEV_FUNC_CAP_MFP_MODE_1:
- if (number == 1)
- p->mfp_mode_1 = true;
+ case I40E_DEV_FUNC_CAP_FLEX10:
+ if (major_rev == 1) {
+ if (number == 1) {
+ p->flex10_enable = true;
+ p->flex10_capable = true;
+ }
+ } else {
+ /* Capability revision >= 2 */
+ if (number & 1)
+ p->flex10_enable = true;
+ if (number & 2)
+ p->flex10_capable = true;
+ }
+ p->flex10_mode = logical_id;
+ p->flex10_status = phys_id;
break;
case I40E_DEV_FUNC_CAP_CEM:
if (number == 1)
@@ -2557,7 +2950,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* Software override ensuring FCoE is disabled if npar or mfp
* mode because it is not supported in these modes.
*/
- if (p->npar_enable || p->mfp_mode_1)
+ if (p->npar_enable || p->flex10_enable)
p->fcoe = false;
/* count the enabled ports (aka the "not disabled" ports) */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index e137e3fac8ee..50fc894a4cde 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -58,9 +58,9 @@
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
#define I40E_IEEE_ETS_CBS_SHIFT 6
-#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
#define I40E_IEEE_ETS_WILLING_SHIFT 7
-#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
@@ -79,9 +79,9 @@
#define I40E_IEEE_PFC_CAP_SHIFT 0
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
#define I40E_IEEE_PFC_MBC_SHIFT 6
-#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
#define I40E_IEEE_PFC_WILLING_SHIFT 7
-#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
/* Defines for IEEE APP TLV */
#define I40E_IEEE_APP_SEL_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index bd5079d5c1b6..1c51f736a8d0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
/* Set up all the App TLVs if DCBx is negotiated */
for (i = 0; i < dcbxcfg->numapps; i++) {
prio = dcbxcfg->app[i].priority;
- tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+ tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
/* Add APP only if the TC is enabled for this VSI */
if (tc_map & vsi->tc_config.enabled_tc) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da0faf478af0..d7c15d17faa6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -964,7 +964,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
pf->auto_disable_flags |= flag;
}
dev_info(&pf->pdev->dev, "requesting a PF reset\n");
- i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
}
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
@@ -1471,19 +1471,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "empr", 4) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index 56438bd579e6..f141e78d409e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
- (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
- ret_code = i40e_validate_nvm_checksum(hw, NULL);
- } else {
- ret_code = I40E_ERR_DIAG_TEST_FAILED;
- }
-
- return ret_code;
+ BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+ return i40e_validate_nvm_checksum(hw, NULL);
+ else
+ return I40E_ERR_DIAG_TEST_FAILED;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a68c65b17ea..83d41c2cb02d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -148,7 +148,9 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+ I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+ I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
/* LPI stats */
I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
@@ -679,15 +681,17 @@ static int i40e_set_settings(struct net_device *netdev,
/* make the aq call */
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
- netdev_info(netdev, "Set phy config failed with error %d.\n",
- status);
+ netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
return -EAGAIN;
}
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status)
- netdev_info(netdev, "Updating link info failed with error %d\n",
- status);
+ netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -707,8 +711,9 @@ static int i40e_nway_reset(struct net_device *netdev)
ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
if (ret) {
- netdev_info(netdev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -820,18 +825,21 @@ static int i40e_set_pauseparam(struct net_device *netdev,
status = i40e_set_fc(hw, &aq_failures, link_up);
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
- status, hw->aq.asq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
- status, hw->aq.asq_last_status);
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
- status, hw->aq.asq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -1009,7 +1017,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
/* register returns value in power of 2, 64Kbyte chunks. */
- val = (64 * 1024) * (1 << val);
+ val = (64 * 1024) * BIT(val);
return val;
}
@@ -1462,20 +1470,11 @@ static int i40e_get_ts_info(struct net_device *dev,
else
info->phc_index = -1;
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
@@ -1591,7 +1590,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* indicate we're in test mode */
dev_close(netdev);
else
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
/* Link test performed before hardware reset
* so autoneg doesn't interfere with test result
@@ -1613,7 +1612,7 @@ static void i40e_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
if (if_running)
dev_open(netdev);
@@ -1646,7 +1645,7 @@ static void i40e_get_wol(struct net_device *netdev,
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
- if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
+ if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
wol->supported = 0;
wol->wolopts = 0;
} else {
@@ -1679,7 +1678,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
- if (((1 << hw->port) & wol_nvm_bits))
+ if (BIT(hw->port) & wol_nvm_bits)
return -EOPNOTSUPP;
/* only magic packet is supported */
@@ -2025,10 +2024,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
default:
return -EINVAL;
@@ -2037,10 +2036,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
default:
return -EINVAL;
@@ -2049,12 +2048,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -2063,12 +2062,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -2081,7 +2080,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2090,15 +2089,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case IPV6_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index c8b621e0e7cd..5ea75dd537d6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -298,8 +298,8 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
/* enable FCoE hash filter */
val = rd32(hw, I40E_PFQF_HENA(1));
- val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
- val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+ val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+ val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
wr32(hw, I40E_PFQF_HENA(1), val);
@@ -308,10 +308,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
/* Reserve 4K DDP contexts and 20K filter size for FCoE */
- pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
- I40E_DMA_CNTX_BASE_SIZE;
+ pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+ I40E_DMA_CNTX_BASE_SIZE;
pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
- (1 << I40E_HASH_FILTER_SIZE_16K) *
+ BIT(I40E_HASH_FILTER_SIZE_16K) *
I40E_HASH_FILTER_BASE_SIZE;
/* FCoE object: max 16K filter buckets and 4K DMA contexts */
@@ -348,7 +348,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app.protocolid == ETH_P_FCOE) {
tc = dcbcfg->etscfg.prioritytable[app.priority];
- enabled_tc |= (1 << tc);
+ enabled_tc |= BIT(tc);
break;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
index 0d49e2d15d40..a93174ddeaba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
@@ -59,9 +59,9 @@
(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \
- (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+ BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \
- (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+ BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \
I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 9b987ccc9e82..5ebe12d56ebf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -116,6 +116,7 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
*
* This function:
* 1. Initializes the pd entry
@@ -129,12 +130,14 @@ exit:
**/
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index)
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
{
i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
+ struct i40e_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
@@ -155,18 +158,24 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
- /* allocate a 4K backing page */
- ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
- I40E_HMC_PAGED_BP_SIZE,
- I40E_HMC_PD_BP_BUF_ALIGNMENT);
- if (ret_code)
- goto exit;
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ pd_entry->rsrc_pg = false;
+ }
- pd_entry->bp.addr = mem;
+ pd_entry->bp.addr = *page;
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
- page_desc = mem.pa | 0x1;
+ page_desc = page->pa | 0x1;
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
@@ -240,7 +249,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (!pd_entry->rsrc_pg)
+ ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
if (ret_code)
goto exit;
if (!pd_table->ref_cnt)
@@ -287,21 +297,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
- i40e_status ret_code = 0;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
- if (ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+ return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
}
/**
@@ -341,20 +345,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- /* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
- if (ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+ return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 732a02660330..d90669211392 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
+ bool rsrc_pg;
bool valid;
};
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
- (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index);
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index 0079ad7bcd0e..fa371a2a40c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
obj->cnt = txq_num;
obj->base = 0;
size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (txq_num > obj->max_cnt) {
@@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (rxq_num > obj->max_cnt) {
@@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_cntx_num > obj->max_cnt) {
@@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_filt_num > obj->max_cnt) {
@@ -387,7 +387,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* update the pd table entry */
ret_code = i40e_add_pd_table_entry(hw,
info->hmc_info,
- i);
+ i, NULL);
if (ret_code) {
pd_error = true;
break;
@@ -763,7 +763,7 @@ static void i40e_write_byte(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u8)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
src_byte = *from;
src_byte &= mask;
@@ -804,7 +804,7 @@ static void i40e_write_word(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u16)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@@ -854,7 +854,7 @@ static void i40e_write_dword(u8 *hmc_bits,
* to 5 bits so the shift will do nothing
*/
if (ce_info->width < 32)
- mask = ((u32)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
else
mask = ~(u32)0;
@@ -906,7 +906,7 @@ static void i40e_write_qword(u8 *hmc_bits,
* to 6 bits so the shift will do nothing
*/
if (ce_info->width < 64)
- mask = ((u64)1 << ce_info->width) - 1;
+ mask = BIT_ULL(ce_info->width) - 1;
else
mask = ~(u64)0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 48a52b35b614..3bb832a2ec51 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 4
+#define DRV_VERSION_BUILD 6
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -76,6 +76,9 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
/* required last entry */
{0, }
};
@@ -520,7 +523,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
if (likely(new_data >= *offset))
*stat = new_data - *offset;
else
- *stat = (new_data + ((u64)1 << 48)) - *offset;
+ *stat = (new_data + BIT_ULL(48)) - *offset;
*stat &= 0xFFFFFFFFFFFFULL;
}
@@ -543,7 +546,7 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
if (likely(new_data >= *offset))
*stat = (u32)(new_data - *offset);
else
- *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+ *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
}
/**
@@ -1123,6 +1126,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded,
&osd->rx_lpi_count, &nsd->rx_lpi_count);
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+ !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ nsd->fd_sb_status = true;
+ else
+ nsd->fd_sb_status = false;
+
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+ !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ nsd->fd_atr_status = true;
+ else
+ nsd->fd_atr_status = false;
+
pf->stat_offsets_loaded = true;
}
@@ -1264,7 +1279,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{
struct i40e_aqc_remove_macvlan_element_data element;
struct i40e_pf *pf = vsi->back;
- i40e_status aq_ret;
+ i40e_status ret;
/* Only appropriate for the PF main VSI */
if (vsi->type != I40E_VSI_MAIN)
@@ -1275,8 +1290,8 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
element.vlan_tag = 0;
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- if (aq_ret)
+ ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+ if (ret)
return -ENOENT;
return 0;
@@ -1514,7 +1529,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
/* Find numtc from enabled TC bitmap */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i)) /* TC is enabled */
+ if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
numtc++;
}
if (!numtc) {
@@ -1535,12 +1550,13 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
*/
qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
num_tc_qps = qcount / numtc;
- num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
+ num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
- if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+ if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+ /* TC is enabled */
int pow, num_qps;
switch (vsi->type) {
@@ -1566,7 +1582,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
/* find the next higher power-of-2 of num queue pairs */
num_qps = qcount;
pow = 0;
- while (num_qps && ((1 << pow) < qcount)) {
+ while (num_qps && (BIT_ULL(pow) < qcount)) {
pow++;
num_qps >>= 1;
}
@@ -1716,10 +1732,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
bool add_happened = false;
int filter_list_len = 0;
u32 changed_flags = 0;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
+ int aq_err = 0;
u16 cmd_flags;
/* empty array typed pointers, kcalloc later */
@@ -1771,31 +1788,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_del == filter_list_len) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw,
- vsi->seid, del_list, num_del,
- NULL);
+ ret = i40e_aq_remove_macvlan(&pf->hw,
+ vsi->seid, del_list, num_del,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
memset(del_list, 0, sizeof(*del_list));
- if (aq_ret &&
- pf->hw.aq.asq_last_status !=
- I40E_AQ_RC_ENOENT)
+ if (ret && aq_err != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
- aq_ret,
- pf->hw.aq.asq_last_status);
+ "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
}
}
if (num_del) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+ ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
del_list, num_del, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
- if (aq_ret &&
- pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+ if (ret && aq_err != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "ignoring delete macvlan error, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
}
kfree(del_list);
@@ -1833,29 +1850,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_add == filter_list_len) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add,
- NULL);
+ ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_add = 0;
- if (aq_ret)
+ if (ret)
break;
memset(add_list, 0, sizeof(*add_list));
}
}
if (num_add) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add, NULL);
+ ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_add = 0;
}
kfree(add_list);
add_list = NULL;
- if (add_happened && aq_ret &&
- pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
+ if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
dev_info(&pf->pdev->dev,
- "add filter failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "add filter failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
!test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
@@ -1871,34 +1890,40 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
- vsi->seid,
- cur_multipromisc,
- NULL);
- if (aq_ret)
+ ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_multipromisc,
+ NULL);
+ if (ret)
dev_info(&pf->pdev->dev,
- "set multi promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set multi promisc failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state));
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret)
+ ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (ret)
dev_info(&pf->pdev->dev,
- "set uni promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret)
+ "set uni promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (ret)
dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1994,8 +2019,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: update vsi failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vlan stripping failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
}
}
@@ -2023,8 +2050,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: update vsi failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vlan stripping failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
}
}
@@ -2294,7 +2323,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
{
struct i40e_vsi_context ctxt;
- i40e_status aq_ret;
+ i40e_status ret;
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
@@ -2304,11 +2333,13 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
ctxt.seid = vsi->seid;
ctxt.info = vsi->info;
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: update vsi failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "add pvid failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -2696,9 +2727,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
#endif /* I40E_FCOE */
/* round up for the chip's needs */
vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
- (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+ BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
- (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -2728,7 +2759,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
}
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
- if (!(vsi->tc_config.enabled_tc & (1 << n)))
+ if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
continue;
qoffset = vsi->tc_config.tc_info[n].qoffset;
@@ -2877,6 +2908,9 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+
if (pf->flags & I40E_FLAG_PTP)
val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
@@ -3167,6 +3201,13 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0 & I40E_PFINT_ICR0_SWINT_MASK))
pf->sw_int_count++;
+ if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+ icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+ dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
+ }
+
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
@@ -4073,7 +4114,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
if (app.selector == I40E_APP_SEL_TCPIP &&
app.protocolid == I40E_APP_PROTOID_ISCSI) {
tc = dcbcfg->etscfg.prioritytable[app.priority];
- enabled_tc |= (1 << tc);
+ enabled_tc |= BIT_ULL(tc);
break;
}
}
@@ -4122,7 +4163,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
u8 i;
for (i = 0; i < num_tc; i++)
- enabled_tc |= 1 << i;
+ enabled_tc |= BIT(i);
return enabled_tc;
}
@@ -4157,7 +4198,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
/* At least have TC0 */
enabled_tc = (enabled_tc ? enabled_tc : 0x1);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
num_tc++;
}
return num_tc;
@@ -4179,11 +4220,11 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
/* Find the first enabled TC */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
break;
}
- return 1 << i;
+ return BIT(i);
}
/**
@@ -4221,26 +4262,28 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret;
+ i40e_status ret;
u32 tc_bw_max;
int i;
/* Get the VSI level BW configuration */
- aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
- if (aq_ret) {
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi bw config, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi bw config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
/* Get the VSI level BW configuration per TC */
- aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
- NULL);
- if (aq_ret) {
+ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+ NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -4279,16 +4322,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
- i40e_status aq_ret;
+ i40e_status ret;
int i;
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
- aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
- NULL);
- if (aq_ret) {
+ ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+ NULL);
+ if (ret) {
dev_info(&vsi->back->pdev->dev,
"AQ command Config VSI BW allocation per TC failed = %d\n",
vsi->back->hw.aq.asq_last_status);
@@ -4337,7 +4380,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
* will set the numtc for netdev as 2 that will be
* referenced by the netdev layer as TC 0 and 1.
*/
- if (vsi->tc_config.enabled_tc & (1 << i))
+ if (vsi->tc_config.enabled_tc & BIT_ULL(i))
netdev_set_tc_queue(netdev,
vsi->tc_config.tc_info[i].netdev_tc,
vsi->tc_config.tc_info[i].qcount,
@@ -4399,7 +4442,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Enable ETS TCs with equal BW Share for now across all VSIs */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
bw_share[i] = 1;
}
@@ -4423,8 +4466,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vsi failed, aq_err=%d\n",
- vsi->back->hw.aq.asq_last_status);
+ "Update vsi tc config failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -4435,8 +4480,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "Failed updating vsi bw info, aq_err=%d\n",
- vsi->back->hw.aq.asq_last_status);
+ "Failed updating vsi bw info, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
goto out;
}
@@ -4469,7 +4516,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
/* Enable ETS TCs with equal BW Share for now */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
bw_data.tc_bw_share_credits[i] = 1;
}
@@ -4477,8 +4524,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "veb bw config failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ "VEB bw config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -4486,8 +4534,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed getting veb bw config, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ "Failed getting veb bw config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
out:
@@ -4574,8 +4623,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
ret = i40e_aq_resume_port_tx(hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "AQ command Resume Port Tx failed = %d\n",
- pf->hw.aq.asq_last_status);
+ "Resume Port Tx failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
i40e_service_event_schedule(pf);
@@ -4627,8 +4677,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
}
} else {
dev_info(&pf->pdev->dev,
- "AQ Querying DCB configuration failed: aq_err %d\n",
- pf->hw.aq.asq_last_status);
+ "Query for DCB configuration failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
out:
@@ -4859,7 +4910,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
/* Generate TC map for number of tc requested */
for (i = 0; i < tc; i++)
- enabled_tc |= (1 << i);
+ enabled_tc |= BIT_ULL(i);
/* Requesting same TC configuration as already enabled */
if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -4998,7 +5049,7 @@ err_setup_rx:
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
if (vsi == pf->vsi[pf->lan_vsi])
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
return err;
}
@@ -5066,7 +5117,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
i40e_vc_notify_reset(pf);
/* do the biggest reset indicated */
- if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
/* Request a Global Reset
*
@@ -5081,7 +5132,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
- } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
/* Request a Core Reset
*
@@ -5093,7 +5144,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
- } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
*
@@ -5106,7 +5157,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf);
- } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
/* Find the VSI(s) that requested a re-init */
@@ -5123,7 +5174,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
/* no further action needed, so return now */
return;
- } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
int v;
/* Find the VSI(s) that needs to be brought down */
@@ -5253,7 +5304,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Get updated DCBX data from firmware */
ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
+ dev_info(&pf->pdev->dev,
+ "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto exit;
}
@@ -5761,23 +5815,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
}
if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
}
@@ -5983,27 +6037,29 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
- int aq_ret;
+ int ret;
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s couldn't get PF vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vsi switch failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -6017,27 +6073,29 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
- int aq_ret;
+ int ret;
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s couldn't get PF vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vsi switch failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -6097,7 +6155,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
ret = i40e_add_vsi(ctl_vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "rebuild of owner VSI failed: %d\n", ret);
+ "rebuild of veb_idx %d owner VSI failed: %d\n",
+ veb->idx, ret);
goto end_reconstitute;
}
i40e_vsi_reset_stats(ctl_vsi);
@@ -6176,8 +6235,10 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
buf_len = data_size;
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
dev_info(&pf->pdev->dev,
- "capability discovery failed: aq=%d\n",
- pf->hw.aq.asq_last_status);
+ "capability discovery failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
return -ENODEV;
}
} while (err);
@@ -6363,7 +6424,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
@@ -6373,11 +6436,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
- if (ret) {
- dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
- ret);
+ if (ret)
goto end_core_reset;
- }
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
@@ -6418,12 +6478,16 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* make sure our flow control settings are restored */
ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
if (ret)
- dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+ dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
@@ -6484,8 +6548,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -6647,8 +6713,8 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->pending_vxlan_bitmap & (1 << i)) {
- pf->pending_vxlan_bitmap &= ~(1 << i);
+ if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+ pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
port = pf->vxlan_ports[i];
if (port)
ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
@@ -6659,10 +6725,12 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
- "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+ "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
port ? "add" : "delete",
- ntohs(port), i, ret,
- pf->hw.aq.asq_last_status);
+ ntohs(port), i,
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
pf->vxlan_ports[i] = 0;
}
}
@@ -7013,6 +7081,10 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->count = vsi->num_desc;
tx_ring->size = 0;
tx_ring->dcb_tc = 0;
+ if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
+ tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
vsi->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
@@ -7411,62 +7483,139 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
}
/**
- * i40e_config_rss - Prepare for RSS if used
+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+ struct i40e_aqc_get_set_rss_key_data rss_key;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ bool pf_lut = false;
+ u8 *rss_lut;
+ int ret, i;
+
+ memset(&rss_key, 0, sizeof(rss_key));
+ memcpy(&rss_key, seed, sizeof(rss_key));
+
+ rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
+ if (!rss_lut)
+ return -ENOMEM;
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0; i < vsi->rss_table_size; i++)
+ rss_lut[i] = i % vsi->rss_size;
+
+ ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return ret;
+ }
+
+ if (vsi->type == I40E_VSI_MAIN)
+ pf_lut = true;
+
+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
+ vsi->rss_table_size);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_pf *pf = vsi->back;
+
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+ return i40e_config_rss_aq(vsi, seed);
+
+ return 0;
+}
+
+/**
+ * i40e_config_rss_reg - Prepare for RSS if used
* @pf: board private structure
+ * @seed: RSS hash seed
**/
-static int i40e_config_rss(struct i40e_pf *pf)
+static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
{
- u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
+ u32 *seed_dw = (u32 *)seed;
+ u32 current_queue = 0;
u32 lut = 0;
int i, j;
- u64 hena;
- u32 reg_val;
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ /* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+ lut = 0;
+ for (j = 0; j < 4; j++) {
+ if (current_queue == vsi->rss_size)
+ current_queue = 0;
+ lut |= ((current_queue) << (8 * j));
+ current_queue++;
+ }
+ wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
+ }
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_val;
+ u64 hena;
+
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= I40E_DEFAULT_RSS_HENA;
+ hena |= i40e_pf_get_default_rss_hena(pf);
+
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
- /* Check capability and Set table size and register per hw expectation*/
+ /* Determine the RSS table size based on the hardware capabilities */
reg_val = rd32(hw, I40E_PFQF_CTL_0);
- if (pf->rss_table_size == 512)
- reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
- else
- reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+ reg_val = (pf->rss_table_size == 512) ?
+ (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+ (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
wr32(hw, I40E_PFQF_CTL_0, reg_val);
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
-
- /* The assumption is that lan qp count will be the highest
- * qp count for any PF VSI that needs RSS.
- * If multiple VSIs need RSS support, all the qp counts
- * for those VSIs should be a power of 2 for RSS to work.
- * If LAN VSI is the only consumer for RSS then this requirement
- * is not necessary.
- */
- if (j == vsi->rss_size)
- j = 0;
- /* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (j &
- ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
- /* On i = 3, we have 4 entries in lut; write to the register */
- if ((i & 3) == 3)
- wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
- i40e_flush(hw);
-
- return 0;
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+ return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
+ else
+ return i40e_config_rss_reg(pf, seed);
}
/**
@@ -7533,7 +7682,7 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
i40e_status status;
/* Set the valid bit for this PF */
- bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
@@ -7567,8 +7716,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %d: aq_err %d\n",
- ret, last_aq_status);
+ "Cannot acquire NVM for read access, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -7583,8 +7733,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
i40e_release_nvm(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
- ret, last_aq_status);
+ dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -7596,8 +7747,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %d: aq_err %d\n",
- ret, last_aq_status);
+ "Cannot acquire NVM for write access, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
/* Write it back out unchanged to initiate update NVM,
@@ -7615,8 +7767,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
i40e_release_nvm(&pf->hw);
if (ret)
dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %d aq_err %d\n",
- ret, last_aq_status);
+ "BW settings NOT SAVED, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
bw_commit_out:
return ret;
@@ -7662,7 +7815,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Depending on PF configurations, it is possible that the RSS
* maximum might end up larger than the available queues
*/
- pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
pf->rss_size = 1;
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
@@ -7673,7 +7826,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
/* MFP mode enabled */
- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
if (i40e_get_npar_bw_setting(pf))
@@ -7703,9 +7856,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
if (pf->hw.func_caps.vmdq) {
- pf->flags |= I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
- pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+ pf->flags |= I40E_FLAG_VMDQ_ENABLED;
}
#ifdef I40E_FCOE
@@ -7723,6 +7875,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_MAX_VF_COUNT);
}
#endif /* CONFIG_PCI_IOV */
+ if (pf->hw.mac.type == I40E_MAC_X722) {
+ pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
+ I40E_FLAG_128_QP_RSS_CAPABLE |
+ I40E_FLAG_HW_ATR_EVICT_CAPABLE |
+ I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
+ I40E_FLAG_WB_ON_ITR_CAPABLE |
+ I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
+ }
pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
@@ -7812,7 +7972,7 @@ static int i40e_set_features(struct net_device *netdev,
need_reset = i40e_set_ntuple(pf, features);
if (need_reset)
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
return 0;
}
@@ -7875,7 +8035,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */
pf->vxlan_ports[next_idx] = port;
- pf->pending_vxlan_bitmap |= (1 << next_idx);
+ pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
@@ -7906,7 +8066,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
* and make it pending
*/
pf->vxlan_ports[idx] = 0;
- pf->pending_vxlan_bitmap |= (1 << idx);
+ pf->pending_vxlan_bitmap |= BIT_ULL(idx);
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
@@ -7981,7 +8141,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-#ifdef HAVE_BRIDGE_ATTRIBS
/**
* i40e_ndo_bridge_setlink - Set the hardware bridge mode
* @dev: the netdev being configured
@@ -7995,7 +8154,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* bridge mode enabled.
**/
static int i40e_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+ struct nlmsghdr *nlh,
+ u16 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -8066,14 +8226,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
* Return the mode in which the hardware bridge is operating in
* i.e VEB or VEPA.
**/
-#ifdef HAVE_BRIDGE_FILTER
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask, int nlflags)
-#else
-static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, int nlflags)
-#endif /* HAVE_BRIDGE_FILTER */
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -8097,7 +8252,25 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
nlflags, 0, 0, filter_mask, NULL);
}
-#endif /* HAVE_BRIDGE_ATTRIBS */
+
+#define I40E_MAX_TUNNEL_HDR_LEN 80
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb->encapsulation &&
+ (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ I40E_MAX_TUNNEL_HDR_LEN))
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+ return features;
+}
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
@@ -8133,10 +8306,9 @@ static const struct net_device_ops i40e_netdev_ops = {
#endif
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
-#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
-#endif /* HAVE_BRIDGE_ATTRIBS */
};
/**
@@ -8304,8 +8476,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
return -ENOENT;
}
vsi->info = ctxt.info;
@@ -8327,8 +8501,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ "update vsi failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -8345,9 +8521,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_config_tc(vsi, enabled_tc);
if (ret) {
dev_info(&pf->pdev->dev,
- "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
- enabled_tc, ret,
- pf->hw.aq.asq_last_status);
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+ enabled_tc,
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
}
}
@@ -8438,8 +8616,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add vsi failed, aq_err=%d\n",
- vsi->back->hw.aq.asq_last_status);
+ "add vsi failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -8484,8 +8664,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get vsi bw info, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ "couldn't get vsi bw info, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
}
@@ -8658,7 +8839,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
- "failed to get tracking for %d queues for VSI %d err=%d\n",
+ "failed to get tracking for %d queues for VSI %d err %d\n",
vsi->alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
@@ -8857,6 +9038,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
break;
}
+ if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+ (vsi->type == I40E_VSI_VMDQ2)) {
+ ret = i40e_vsi_config_rss(vsi);
+ }
return vsi;
err_rings:
@@ -8896,8 +9081,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw config failed, aq_err=%d\n",
- hw->aq.asq_last_status);
+ "query veb bw config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -8905,8 +9091,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&ets_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw ets config failed, aq_err=%d\n",
- hw->aq.asq_last_status);
+ "query veb bw ets config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -9090,36 +9277,40 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
+ struct i40e_pf *pf = veb->pf;
bool is_default = false;
bool is_cloud = false;
int ret;
/* get a VEB from the hardware */
- ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
veb->enabled_tc, is_default,
is_cloud, &veb->seid, NULL);
if (ret) {
- dev_info(&veb->pf->pdev->dev,
- "couldn't add VEB, err %d, aq_err %d\n",
- ret, veb->pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "couldn't add VEB, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
/* get statistics counter */
- ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+ ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
&veb->stats_idx, NULL, NULL, NULL);
if (ret) {
- dev_info(&veb->pf->pdev->dev,
- "couldn't get VEB statistics idx, err %d, aq_err %d\n",
- ret, veb->pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "couldn't get VEB statistics idx, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
- dev_info(&veb->pf->pdev->dev,
- "couldn't get VEB bw info, err %d, aq_err %d\n",
- ret, veb->pf->hw.aq.asq_last_status);
- i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+ dev_info(&pf->pdev->dev,
+ "couldn't get VEB bw info, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
}
@@ -9325,8 +9516,10 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
&next_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "get switch config failed %d aq_err=%x\n",
- ret, pf->hw.aq.asq_last_status);
+ "get switch config failed err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
kfree(aq_buf);
return -ENOENT;
}
@@ -9367,8 +9560,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
ret = i40e_fetch_switch_configuration(pf, false);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't fetch switch config, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ "couldn't fetch switch config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
i40e_pf_reset_stats(pf);
@@ -9743,7 +9937,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_shared_code(hw);
if (err) {
- dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
goto err_pf_reset;
}
@@ -9910,15 +10105,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
if (err)
- dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -10006,8 +10205,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the requested speeds from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
- dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
- err);
+ dev_info(&pf->pdev->dev,
+ "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* print a string summarizing features */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 554e49d02683..9b83abc0e774 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
/* Switching to words (sr_size contains power of 2KB) */
- nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
@@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
/* Write the address and start reading */
- sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
- (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ BIT(I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
/* Poll I40E_GLNVM_SRCTL until the done bit is set */
@@ -212,6 +212,74 @@ read_nvm_exit:
}
/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ i40e_status ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ i40e_status ret_code = I40E_ERR_TIMEOUT;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+ *data = le16_to_cpu(*(__le16 *)data);
+
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_word - Reads Shadow RAM
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -222,6 +290,8 @@ read_nvm_exit:
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_word_aq(hw, offset, data);
return i40e_read_nvm_word_srctl(hw, offset, data);
}
@@ -257,6 +327,63 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code;
+ u16 read_size = *words;
+ bool last_cmd = false;
+ u16 words_read = 0;
+ u16 i = 0;
+
+ do {
+ /* Calculate number of bytes we should read in this step.
+ * FVL AQ do not allow to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+ read_size = min(*words,
+ (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+ (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+ else
+ read_size = min((*words - words_read),
+ I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = true;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+ data + words_read, last_cmd);
+ if (ret_code)
+ goto read_nvm_buffer_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+ *words = words_read;
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -270,6 +397,8 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 7b34f1e660ea..dcb72a8ee8e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -58,6 +58,19 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index a92b7725dec3..8c40d6ea15fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -43,9 +43,8 @@
#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \
- I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
/**
@@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
- if (!(prttsyn_stat & (1 << index)))
+ if (!(prttsyn_stat & BIT(index)))
return;
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 522d6df51330..acae6c744bc2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -3366,4 +3366,1933 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9a4f2bc70cd2..738aca68f665 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -464,7 +464,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
- if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
@@ -509,8 +509,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
dev_info(&pdev->dev,
"FD filter programming failed due to incorrect filter parameters\n");
}
- } else if (error ==
- (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
rx_desc->wb.qword0.hi_dword.fd_id);
@@ -854,15 +853,40 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
**/
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
- u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
- /* allow 00 to be written to the index */
-
- wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
- val);
+ u16 flags = q_vector->tx.ring[0].flags;
+
+ if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ u32 val;
+
+ if (q_vector->arm_wb_state)
+ return;
+
+ val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+ vsi->base_vector - 1),
+ val);
+ q_vector->arm_wb_state = true;
+ } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+ vsi->base_vector - 1), val);
+ } else {
+ u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+ }
}
/**
@@ -892,7 +916,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
* 20-1249MB/s bulk (8000 ints/s)
*/
bytes_per_int = rc->total_bytes / rc->itr;
- switch (rc->itr) {
+ switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
new_latency_range = I40E_LOW_LATENCY;
@@ -905,9 +929,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break;
case I40E_BULK_LATENCY:
if (bytes_per_int <= 20)
- rc->latency_range = I40E_LOW_LATENCY;
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ default:
+ if (bytes_per_int <= 20)
+ new_latency_range = I40E_LOW_LATENCY;
break;
}
+ rc->latency_range = new_latency_range;
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
@@ -923,42 +952,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break;
}
- if (new_itr != rc->itr) {
- /* do an exponential smoothing */
- new_itr = (10 * new_itr * rc->itr) /
- ((9 * new_itr) + rc->itr);
- rc->itr = new_itr & I40E_MAX_ITR;
- }
+ if (new_itr != rc->itr)
+ rc->itr = new_itr;
rc->total_bytes = 0;
rc->total_packets = 0;
}
/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
- u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
- struct i40e_hw *hw = &q_vector->vsi->back->hw;
- u32 reg_addr;
- u16 old_itr;
-
- reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr)
- wr32(hw, reg_addr, q_vector->rx.itr);
-
- reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr)
- wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
* i40e_clean_programming_status - clean the programming status descriptor
* @rx_ring: the rx ring that has this descriptor
* @rx_desc: the rx descriptor written back by HW
@@ -1386,7 +1387,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
return;
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
/* both known and outer_ip must be set for the below code to work */
@@ -1401,25 +1402,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6 = true;
if (ipv4 &&
- (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
if (ipv6 &&
- rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1428,7 +1429,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
- if (ipv4_tunnel) {
+ if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
+ (ipv4_tunnel)) {
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1543,7 +1545,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1584,8 +1586,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1637,7 +1639,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i];
@@ -1647,7 +1649,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
continue;
}
@@ -1669,7 +1671,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
@@ -1730,7 +1732,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1753,7 +1755,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1771,13 +1773,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
rx_ring->rx_stats.non_eop_descs++;
continue;
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
@@ -1802,7 +1804,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
@@ -1827,6 +1829,68 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
}
/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ u16 old_itr;
+ int vector;
+ u32 val;
+
+ vector = (q_vector->v_idx + vsi->base_vector);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr) {
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_RX_ITR <<
+ I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (q_vector->rx.itr <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+ } else {
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE <<
+ I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ }
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+ } else {
+ i40e_irq_dynamic_enable(vsi,
+ q_vector->v_idx + vsi->base_vector);
+ }
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr) {
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_TX_ITR <<
+ I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (q_vector->tx.itr <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+ } else {
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE <<
+ I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ }
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+ vsi->base_vector - 1), val);
+ } else {
+ i40e_irq_dynamic_enable(vsi,
+ q_vector->v_idx + vsi->base_vector);
+ }
+}
+
+/**
* i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
@@ -1880,35 +1944,29 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
+ if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ q_vector->arm_wb_state = false;
+
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
- ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- i40e_update_dynamic_itr(q_vector);
-
- if (!test_bit(__I40E_DOWN, &vsi->state)) {
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
- i40e_irq_dynamic_enable(vsi,
- q_vector->v_idx + vsi->base_vector);
- } else {
- struct i40e_hw *hw = &vsi->back->hw;
- /* We re-enable the queue 0 cause, but
- * don't worry about dynamic_enable
- * because we left it on for the other
- * possible interrupts during napi
- */
- u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
- qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_RQCTL(0), qval);
-
- qval = rd32(hw, I40E_QINT_TQCTL(0));
- qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_TQCTL(0), qval);
-
- i40e_irq_dynamic_enable_icr0(vsi->back);
- }
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_update_enable_itr(vsi, q_vector);
+ } else { /* Legacy mode */
+ struct i40e_hw *hw = &vsi->back->hw;
+ /* We re-enable the queue 0 cause, but
+ * don't worry about dynamic_enable
+ * because we left it on for the other
+ * possible interrupts during napi
+ */
+ u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+
+ wr32(hw, I40E_QINT_RQCTL(0), qval);
+ qval = rd32(hw, I40E_QINT_TQCTL(0)) |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), qval);
+ i40e_irq_dynamic_enable_icr0(vsi->back);
}
-
return 0;
}
@@ -1982,6 +2040,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
return;
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+ /* HW ATR eviction will take care of removing filters on FIN
+ * and RST packets.
+ */
+ if (th->fin || th->rst)
+ return;
+ }
tx_ring->atr_count++;
@@ -2037,6 +2102,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+ dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
+
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->rsvd = cpu_to_le32(0);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
@@ -2244,11 +2312,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
+ struct udphdr *oudph;
+ struct iphdr *oiph;
u32 l4_tunnel = 0;
if (skb->encapsulation) {
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
+ oudph = udp_hdr(skb);
+ oiph = ip_hdr(skb);
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
@@ -2285,6 +2357,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
*tx_flags &= ~I40E_TX_FLAGS_IPV4;
*tx_flags |= I40E_TX_FLAGS_IPV6;
}
+ if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+ (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
+ (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+ oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+ oiph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, 0);
+ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -2616,6 +2697,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)))
writel(i, tx_ring->tail);
+ else
+ prefetchw(tx_desc + 1);
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 0dc48dc9ca61..f1385a1989fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -66,17 +66,29 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+ BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
@@ -129,17 +141,17 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
-#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN BIT(2)
+#define I40E_TX_FLAGS_TSO BIT(3)
+#define I40E_TX_FLAGS_IPV4 BIT(4)
+#define I40E_TX_FLAGS_IPV6 BIT(5)
+#define I40E_TX_FLAGS_FCCRC BIT(6)
+#define I40E_TX_FLAGS_FSO BIT(7)
+#define I40E_TX_FLAGS_TSYN BIT(8)
+#define I40E_TX_FLAGS_FD_SB BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -253,6 +265,10 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
+ u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
+
/* stats structs */
struct i40e_queue_stats stats;
struct u64_stats_sync syncp;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 9a5a75b1e2bc..61b6b114b4bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -47,6 +47,11 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
@@ -120,6 +125,8 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -213,7 +220,17 @@ struct i40e_hw_capabilities {
bool dcb;
bool fcoe;
bool iscsi; /* Indicates iSCSI enabled */
- bool mfp_mode_1;
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -487,11 +504,13 @@ struct i40e_hw {
/* debug mask */
u32 debug_mask;
+ char err_str[16];
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
{
- return hw->mac.type == I40E_MAC_VF;
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
@@ -588,19 +607,23 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ /* Note: Bit 8 is reserved in X710 and XL710 */
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+ * UDP header
+ */
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -608,8 +631,8 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
- I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+ BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
@@ -743,8 +766,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
- I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
@@ -920,12 +942,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
- I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
@@ -937,6 +959,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
struct i40e_filter_program_desc {
__le32 qindex_flex_ptype_vsi;
__le32 rsvd;
@@ -955,15 +979,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-30 are reserved for future use */
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-40 are reserved for future use */
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -990,8 +1023,8 @@ enum i40e_filter_program_desc_fd_status {
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+ BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
@@ -1009,14 +1042,17 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
- I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@@ -1134,6 +1170,8 @@ struct i40e_hw_port_stats {
u64 fd_atr_match;
u64 fd_sb_match;
u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 2d20af290fbf..a7ab463b4474 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 0
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
@@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource {
u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 23f95cdbdfcc..8a7607c6e142 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -160,13 +160,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
**/
static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
{
- struct i40e_hw *hw = &pf->hw;
- u32 reg;
-
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- i40e_flush(hw);
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
}
/**
@@ -282,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
}
tempmap = vecmap->rxq_map;
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- linklistmap |= (1 <<
- (I40E_VIRTCHNL_SUPPORTED_QTYPES *
- vsi_queue_id));
+ linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id));
}
tempmap = vecmap->txq_map;
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- linklistmap |= (1 <<
- (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
- + 1));
+ linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id + 1));
}
next_q = find_first_bit(&linklistmap,
@@ -337,7 +330,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
reg = (vector_id) |
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
- (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
(itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
wr32(hw, reg_idx, reg);
}
@@ -542,11 +535,13 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id, true, false);
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
+ true, false);
if (!f)
dev_info(&pf->pdev->dev,
"Could not allocate VF MAC addr\n");
- f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+ f = i40e_add_filter(vsi, brdcast,
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
if (!f)
dev_info(&pf->pdev->dev,
@@ -835,6 +830,7 @@ complete_reset:
i40e_alloc_vf_res(vf);
i40e_enable_vf_mappings(vf);
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -899,7 +895,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
for (vf_id = 0; vf_id < tmp; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
}
clear_bit(__I40E_VF_DISABLE, &pf->state);
@@ -1123,12 +1119,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
*
* called from the VF to request the API version used by the PF
**/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_virtchnl_version_info info = {
I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
};
+ vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(vf))
+ info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
I40E_SUCCESS, (u8 *)&info,
sizeof(struct
@@ -1143,7 +1143,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
*
* called from the VF to request its resources
**/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf;
@@ -1167,12 +1167,24 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
len = 0;
goto err;
}
+ if (VF_IS_V11(vf))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
-
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ } else {
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
@@ -1773,9 +1785,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
valid_len = 0;
break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(vf))
+ valid_len = sizeof(u32);
+ else
+ valid_len = 0;
+ break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
break;
@@ -1888,10 +1905,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
- ret = i40e_vc_get_version_msg(vf);
+ ret = i40e_vc_get_version_msg(vf, msg);
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- ret = i40e_vc_get_vf_resources_msg(vf);
+ ret = i40e_vc_get_vf_resources_msg(vf, msg);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
@@ -1969,9 +1986,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
- if (reg & (1 << bit_idx)) {
+ if (reg & BIT(bit_idx)) {
/* clear the bit in GLGEN_VFLRSTAT */
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
if (!test_bit(__I40E_DOWN, &pf->state))
i40e_reset_vf(vf, true);
@@ -2023,7 +2040,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* delete the temporary mac address */
- i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+ i40e_del_filter(vsi, vf->default_lan_addr.addr,
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
/* Delete all the filters for this VSI - we're going to kill it
@@ -2088,6 +2106,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
+ if (vsi->info.pvid == (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)))
+ /* duplicate request, so just return success */
+ goto error_pvid;
+
if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 09043c1aae54..736f6f08b4f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -42,6 +42,9 @@
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0x7000
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -75,6 +78,8 @@ struct i40e_vf {
u16 vf_id;
/* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
+ struct i40e_virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
/* VF Port Extender (PE) stag if used */
u16 stag;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index e715bccfb5d2..c8022092d369 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -34,8 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
-#define I40E_FW_API_VERSION_A0_MINOR 0x0000
+#define I40E_FW_API_VERSION_MINOR 0x0004
struct i40e_aq_desc {
__le16 flags;
@@ -133,12 +132,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
-
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -260,7 +254,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -272,8 +269,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
@@ -507,7 +502,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -530,7 +526,9 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
@@ -824,8 +822,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1066,6 +1068,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@@ -2093,6 +2096,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure_A0 {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 39fcb1dc4ea6..023d32d090ce 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -54,6 +54,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_20G_KR2:
hw->mac.type = I40E_MAC_XL710;
break;
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -72,6 +81,212 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
}
/**
+ * i40evf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40evf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+ switch (stat_err) {
+ case 0:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
* i40evf_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -177,6 +392,169 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+ cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+ status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40evf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+ cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+ status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40evf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index 931c88044300..00ed24bfce13 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
+ bool rsrc_pg;
bool valid;
};
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
- (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index);
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 58e37a44b80a..55ae4b0f8192 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -60,6 +60,19 @@ void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 3cc737629bf7..2e2ccc1719b6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -3366,4 +3366,64 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 395f32f226c0..7309479a0764 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -366,15 +366,32 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
**/
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
- u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
- I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
- /* allow 00 to be written to the index */
-
- wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
- val);
+ u16 flags = q_vector->tx.ring[0].flags;
+
+ if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ u32 val;
+
+ if (q_vector->arm_wb_state)
+ return;
+
+ val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK;
+
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+ vsi->base_vector - 1),
+ val);
+ q_vector->arm_wb_state = true;
+ } else {
+ u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+ I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+ vsi->base_vector - 1), val);
+ }
}
/**
@@ -404,7 +421,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
* 20-1249MB/s bulk (8000 ints/s)
*/
bytes_per_int = rc->total_bytes / rc->itr;
- switch (rc->itr) {
+ switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
new_latency_range = I40E_LOW_LATENCY;
@@ -417,9 +434,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break;
case I40E_BULK_LATENCY:
if (bytes_per_int <= 20)
- rc->latency_range = I40E_LOW_LATENCY;
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ default:
+ if (bytes_per_int <= 20)
+ new_latency_range = I40E_LOW_LATENCY;
break;
}
+ rc->latency_range = new_latency_range;
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
@@ -435,42 +457,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break;
}
- if (new_itr != rc->itr) {
- /* do an exponential smoothing */
- new_itr = (10 * new_itr * rc->itr) /
- ((9 * new_itr) + rc->itr);
- rc->itr = new_itr & I40E_MAX_ITR;
- }
+ if (new_itr != rc->itr)
+ rc->itr = new_itr;
rc->total_bytes = 0;
rc->total_packets = 0;
}
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
- u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
- struct i40e_hw *hw = &q_vector->vsi->back->hw;
- u32 reg_addr;
- u16 old_itr;
-
- reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr)
- wr32(hw, reg_addr, q_vector->rx.itr);
-
- reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr)
- wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
+/*
* i40evf_setup_tx_descriptors - Allocate the Tx descriptors
* @tx_ring: the tx ring to set up
*
@@ -873,7 +867,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
return;
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
/* both known and outer_ip must be set for the below code to work */
@@ -888,25 +882,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6 = true;
if (ipv4 &&
- (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
if (ipv6 &&
- rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1027,7 +1021,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1063,8 +1057,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1116,7 +1110,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i];
@@ -1126,7 +1120,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
continue;
}
@@ -1141,7 +1135,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
@@ -1202,7 +1196,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1220,7 +1214,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1238,13 +1232,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
rx_ring->rx_stats.non_eop_descs++;
continue;
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
@@ -1262,7 +1256,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
@@ -1281,6 +1275,67 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
}
/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ u16 old_itr;
+ int vector;
+ u32 val;
+
+ vector = (q_vector->v_idx + vsi->base_vector);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr) {
+ val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_RX_ITR <<
+ I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (q_vector->rx.itr <<
+ I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
+ } else {
+ val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE <<
+ I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ }
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+ } else {
+ i40evf_irq_enable_queues(vsi->back, 1
+ << q_vector->v_idx);
+ }
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr) {
+ val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_TX_ITR <<
+ I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (q_vector->tx.itr <<
+ I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+ } else {
+ val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE <<
+ I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ }
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+ } else {
+ i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+ }
+}
+
+/**
* i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
@@ -1334,15 +1389,12 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
+ if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ q_vector->arm_wb_state = false;
+
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
- ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- i40e_update_dynamic_itr(q_vector);
-
- if (!test_bit(__I40E_DOWN, &vsi->state))
- i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
-
+ i40e_update_enable_itr(vsi, q_vector);
return 0;
}
@@ -1476,11 +1528,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
+ struct udphdr *oudph;
+ struct iphdr *oiph;
u32 l4_tunnel = 0;
if (skb->encapsulation) {
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
+ oudph = udp_hdr(skb);
+ oiph = ip_hdr(skb);
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
@@ -1519,6 +1575,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
+ if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+ (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
+ (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+ oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+ oiph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, 0);
+ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -1841,6 +1906,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)))
writel(i, tx_ring->tail);
+ else
+ prefetchw(tx_desc + 1);
return;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index e7a34f899f2c..9a30f5d8c089 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -66,17 +66,29 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+ BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
@@ -129,16 +141,16 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN BIT(2)
+#define I40E_TX_FLAGS_TSO BIT(3)
+#define I40E_TX_FLAGS_IPV4 BIT(4)
+#define I40E_TX_FLAGS_IPV6 BIT(5)
+#define I40E_TX_FLAGS_FCCRC BIT(6)
+#define I40E_TX_FLAGS_FSO BIT(7)
+#define I40E_TX_FLAGS_FD_SB BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -250,6 +262,10 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
+ u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
+
/* stats structs */
struct i40e_queue_stats stats;
struct u64_stats_sync syncp;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index c463ec41579c..e32dc0b3616d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -47,6 +47,11 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
@@ -120,6 +125,8 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -213,7 +220,17 @@ struct i40e_hw_capabilities {
bool dcb;
bool fcoe;
bool iscsi; /* Indicates iSCSI enabled */
- bool mfp_mode_1;
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -481,11 +498,13 @@ struct i40e_hw {
/* debug mask */
u32 debug_mask;
+ char err_str[16];
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
{
- return hw->mac.type == I40E_MAC_VF;
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
@@ -582,19 +601,23 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ /* Note: Bit 8 is reserved in X710 and XL710 */
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+ * UDP header
+ */
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -602,8 +625,8 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
- I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+ BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
@@ -737,8 +760,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
- I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
@@ -914,12 +936,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
- I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
@@ -931,6 +953,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
struct i40e_filter_program_desc {
__le32 qindex_flex_ptype_vsi;
__le32 rsvd;
@@ -949,15 +973,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-30 are reserved for future use */
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-40 are reserved for future use */
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -984,8 +1017,8 @@ enum i40e_filter_program_desc_fd_status {
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+ BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
@@ -1003,8 +1036,7 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
- I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1109,6 +1141,8 @@ struct i40e_hw_port_stats {
u64 fd_atr_match;
u64 fd_sb_match;
u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 59f62f0e65dd..1e89dea0d529 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 0
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
@@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource {
u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index fea3b75a9a35..3817cbbf45e6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -101,6 +101,8 @@ struct i40e_vsi {
#define MAX_RX_QUEUES 8
#define MAX_TX_QUEUES MAX_RX_QUEUES
+#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
+
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
@@ -115,6 +117,7 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
+ bool arm_wb_state;
cpumask_var_t affinity_mask;
};
@@ -207,33 +210,39 @@ struct i40evf_adapter {
struct msix_entry *msix_entries;
u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
-#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
-#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3)
-#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4)
-#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
-#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
-#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
-#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
-#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10)
-/* duplcates for common code */
+#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
+#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
+#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
+#define I40EVF_FLAG_IN_NETPOLL BIT(4)
+#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
+#define I40EVF_FLAG_RESET_PENDING BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED BIT(10)
+#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
+#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
+/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
/* flags for admin queue service task */
u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9)
+#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
/* OS defined structs */
struct net_device *netdev;
@@ -249,8 +258,17 @@ struct i40evf_adapter {
bool netdev_registered;
bool link_up;
enum i40e_virtchnl_ops current_op;
+#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ struct i40e_virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+ ((_a)->pf_version.minor == 1))
u16 msg_enable;
struct i40e_eth_stats current_stats;
struct i40e_vsi vsi;
@@ -264,6 +282,7 @@ extern const char i40evf_driver_version[];
int i40evf_up(struct i40evf_adapter *adapter);
void i40evf_down(struct i40evf_adapter *adapter);
+int i40evf_process_config(struct i40evf_adapter *adapter);
void i40evf_reset(struct i40evf_adapter *adapter);
void i40evf_set_ethtool_ops(struct net_device *netdev);
void i40evf_update_stats(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 2b53c870e7f1..4790437a50ac 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
@@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
break;
case TCP_V6_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
@@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
default:
return -EINVAL;
@@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
default:
return -EINVAL;
@@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case IPV6_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4ab4ebba07a1..2a6063a3a14d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.2.25"
+#define DRV_VERSION "1.3.2"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -49,6 +49,7 @@ static const char i40evf_copyright[] =
*/
static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
/* required last entry */
{0, }
};
@@ -240,7 +241,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
int i;
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & (1 << (i - 1))) {
+ if (mask & BIT(i - 1)) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
@@ -268,7 +269,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
}
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & (1 << i)) {
+ if (mask & BIT(i)) {
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
@@ -377,7 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
q_vector->num_ringpairs++;
- q_vector->ring_mask |= (1 << t_idx);
+ q_vector->ring_mask |= BIT(t_idx);
}
/**
@@ -406,7 +407,7 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
/* The ideal configuration...
* We have enough vectors to map one per queue.
*/
- if (q_vectors == (rxr_remaining * 2)) {
+ if (q_vectors >= (rxr_remaining * 2)) {
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
@@ -892,8 +893,10 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
break;
}
}
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+ found = true;
}
- if (found) {
+ if (!found) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
}
@@ -1170,6 +1173,113 @@ out:
}
/**
+ * i40e_configure_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+ struct i40e_aqc_get_set_rss_key_data rss_key;
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_hw *hw = &adapter->hw;
+ int ret = 0, i;
+ u8 *rss_lut;
+
+ if (!vsi->id)
+ return;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ memset(&rss_key, 0, sizeof(rss_key));
+ memcpy(&rss_key, seed, sizeof(rss_key));
+
+ rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL);
+ if (!rss_lut)
+ return;
+
+ /* Populate the LUT with max no. PF queues in round robin fashion */
+ for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++)
+ rss_lut[i] = i % adapter->num_active_queues;
+
+ ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+ return;
+ }
+
+ ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut,
+ (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4);
+ if (ret)
+ dev_err(&adapter->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+}
+
+/**
+ * i40e_configure_rss_reg - Prepare for RSS if used
+ * @adapter: board private structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter,
+ const u8 *seed)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u32 *seed_dw = (u32 *)seed;
+ u32 cqueue = 0;
+ u32 lut = 0;
+ int i, j;
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
+
+ /* Populate the LUT with max no. PF queues in round robin fashion */
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ lut = 0;
+ for (j = 0; j < 4; j++) {
+ if (cqueue == adapter->num_active_queues)
+ cqueue = 0;
+ lut |= ((cqueue) << (8 * j));
+ cqueue++;
+ }
+ wr32(hw, I40E_VFQF_HLUT(i), lut);
+ }
+ i40e_flush(hw);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u8 seed[I40EVF_HKEY_ARRAY_SIZE];
+ u64 hena;
+
+ netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
+
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ hena = I40E_DEFAULT_RSS_HENA;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+ if (RSS_AQ(adapter))
+ i40evf_configure_rss_aq(&adapter->vsi, seed);
+ else
+ i40evf_configure_rss_reg(adapter, seed);
+}
+
+/**
* i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
*
@@ -1369,6 +1479,10 @@ static void i40evf_watchdog_task(struct work_struct *work)
}
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
+ i40evf_send_vf_config_msg(adapter);
+ goto watchdog_done;
+ }
if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
i40evf_disable_queues(adapter);
@@ -1410,6 +1524,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
+ /* This message goes straight to the firmware, not the
+ * PF, so we don't have to set current_op as we will
+ * not get a response through the ARQ.
+ */
+ i40evf_configure_rss(adapter);
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ goto watchdog_done;
+ }
+
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
watchdog_done:
@@ -1432,45 +1556,6 @@ restart_watchdog:
schedule_work(&adapter->adminq_task);
}
-/**
- * i40evf_configure_rss - Prepare for RSS
- * @adapter: board private structure
- **/
-static void i40evf_configure_rss(struct i40evf_adapter *adapter)
-{
- u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
- struct i40e_hw *hw = &adapter->hw;
- u32 cqueue = 0;
- u32 lut = 0;
- int i, j;
- u64 hena;
-
- /* Hash type is configured by the PF - we just supply the key */
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
-
- /* Fill out hash function seed */
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
-
- /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
- hena = I40E_DEFAULT_RSS_HENA;
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
- lut = 0;
- for (j = 0; j < 4; j++) {
- if (cqueue == adapter->num_active_queues)
- cqueue = 0;
- lut |= ((cqueue) << (8 * j));
- cqueue++;
- }
- wr32(hw, I40E_VFQF_HLUT(i), lut);
- }
- i40e_flush(hw);
-}
-
#define I40EVF_RESET_WAIT_MS 10
#define I40EVF_RESET_WAIT_COUNT 500
/**
@@ -1604,7 +1689,8 @@ continue_reset:
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
err);
- i40evf_map_queues(adapter);
+ adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+ adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -1614,7 +1700,7 @@ continue_reset:
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
f->add = true;
}
- adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
@@ -1856,6 +1942,7 @@ static int i40evf_open(struct net_device *netdev)
if (err)
goto err_req_irq;
+ i40evf_add_filter(adapter, adapter->hw.mac.addr);
i40evf_configure(adapter);
err = i40evf_up_complete(adapter);
@@ -1979,6 +2066,62 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
}
/**
+ * i40evf_process_config - Process the config information we got from the PF
+ * @adapter: board private structure
+ *
+ * Verify that we have a valid config struct, and set up our netdev features
+ * and our VSI struct.
+ **/
+int i40evf_process_config(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+ return -ENODEV;
+ }
+
+ if (adapter->vf_res->vf_offload_flags
+ & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+ netdev->vlan_features = netdev->features;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+ netdev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_GRO;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+ adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ adapter->vsi.netdev = adapter->netdev;
+ return 0;
+}
+
+/**
* i40evf_init_task - worker thread to perform delayed initialization
* @work: pointer to work_struct containing our data
*
@@ -1996,10 +2139,9 @@ static void i40evf_init_task(struct work_struct *work)
struct i40evf_adapter,
init_task.work);
struct net_device *netdev = adapter->netdev;
- struct i40evf_mac_filter *f;
struct i40e_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- int i, err, bufsz;
+ int err, bufsz;
switch (adapter->state) {
case __I40EVF_STARTUP:
@@ -2050,6 +2192,12 @@ static void i40evf_init_task(struct work_struct *work)
if (err) {
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
err = i40evf_send_api_ver(adapter);
+ else
+ dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+ adapter->pf_version.major,
+ adapter->pf_version.minor,
+ I40E_VIRTCHNL_VERSION_MAJOR,
+ I40E_VIRTCHNL_VERSION_MINOR);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
@@ -2085,42 +2233,15 @@ static void i40evf_init_task(struct work_struct *work)
default:
goto err_alloc;
}
- /* got VF config message back from PF, now we can parse it */
- for (i = 0; i < adapter->vf_res->num_vsis; i++) {
- if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
- adapter->vsi_res = &adapter->vf_res->vsi_res[i];
- }
- if (!adapter->vsi_res) {
- dev_err(&pdev->dev, "No LAN VSI found\n");
+ if (i40evf_process_config(adapter))
goto err_alloc;
- }
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netdev->features |= NETIF_F_HIGHDMA |
- NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM |
- NETIF_F_GRO;
-
- if (adapter->vf_res->vf_offload_flags
- & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
- netdev->vlan_features = netdev->features;
- netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
- }
-
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features &= ~NETIF_F_RXCSUM;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -2130,16 +2251,6 @@ static void i40evf_init_task(struct work_struct *work)
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
- f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f)
- goto err_sw_init;
-
- ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
- f->add = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
- list_add(&f->list, &adapter->mac_filter_list);
-
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &i40evf_watchdog_timer;
adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -2154,24 +2265,14 @@ static void i40evf_init_task(struct work_struct *work)
if (err)
goto err_sw_init;
i40evf_map_rings_to_vectors(adapter);
- i40evf_configure_rss(adapter);
+ if (!RSS_AQ(adapter))
+ i40evf_configure_rss(adapter);
err = i40evf_request_misc_irq(adapter);
if (err)
goto err_sw_init;
netif_carrier_off(netdev);
- adapter->vsi.id = adapter->vsi_res->vsi_id;
- adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
- adapter->vsi.back = adapter;
- adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- adapter->vsi.netdev = adapter->netdev;
-
if (!adapter->netdev_registered) {
err = register_netdev(netdev);
if (err)
@@ -2190,6 +2291,13 @@ static void i40evf_init_task(struct work_struct *work)
adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
+
+ if (RSS_AQ(adapter)) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ } else {
+ i40evf_configure_rss(adapter);
+ }
return;
restart:
schedule_delayed_work(&adapter->init_task,
@@ -2299,7 +2407,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &adapter->hw;
hw->back = adapter;
- adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
adapter->state = __I40EVF_STARTUP;
/* Call save state here because it relies on the adapter struct. */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 61e090558f31..d4eb1a5e7d42 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -51,8 +51,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err)
- dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
- op, err, hw->aq.asq_last_status);
+ dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+ op, i40evf_stat_str(hw, err),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -125,8 +126,11 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
}
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
- if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
- (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ adapter->pf_version = *pf_vvi;
+
+ if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+ ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+ (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
err = -EIO;
out_alloc:
@@ -145,8 +149,24 @@ out:
**/
int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
{
- return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- NULL, 0);
+ u32 caps;
+
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+ caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+ if (PF_IS_V11(adapter))
+ return i40evf_send_pf_msg(adapter,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ (u8 *)&caps, sizeof(caps));
+ else
+ return i40evf_send_pf_msg(adapter,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
}
/**
@@ -274,7 +294,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
}
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+ vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -299,7 +319,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
}
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+ vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
@@ -708,8 +728,9 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return;
}
if (v_retval) {
- dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
- __func__, v_retval, v_opcode);
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
+ __func__, v_retval,
+ i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
}
switch (v_opcode) {
case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -729,6 +750,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->current_stats = *stats;
}
break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
+ u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI *
+ sizeof(struct i40e_virtchnl_vsi_resource);
+ memcpy(adapter->vf_res, msg, min(msglen, len));
+ i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+ i40evf_process_config(adapter);
+ }
+ break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
@@ -740,7 +770,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_free_all_rx_resources(adapter);
break;
case I40E_VIRTCHNL_OP_VERSION:
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
/* Don't display an error if we get these out of sequence.
* If the firmware needed to get kicked, we'll get these and
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b0182dd31346..d19256994e5c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* reset page to 0 */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
- if (ret_val)
- return ret_val;
if (data & E1000_M88E1112_STATUS_LINK)
port = E1000_MEDIA_PORT_OTHER;
@@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
if (port && (hw->dev_spec._82575.media_port != port)) {
hw->dev_spec._82575.media_port = port;
hw->dev_spec._82575.media_changed = true;
+ }
+
+ if (port == E1000_MEDIA_PORT_COPPER) {
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ igb_check_for_link_82575(hw);
} else {
- ret_val = igb_check_for_link_82575(hw);
+ igb_check_for_link_82575(hw);
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
}
return 0;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c1bb64d8366f..987c9de24764 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,5 +1,5 @@
/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -36,9 +36,6 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_m88_cable_length_table) / \
- sizeof(e1000_m88_cable_length_table[0]))
static const u16 e1000_igp_2_cable_length_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -49,9 +46,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
104, 109, 114, 118, 121, 124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_igp_2_cable_length_table) / \
- sizeof(e1000_igp_2_cable_length_table[0]))
/**
* igb_check_reset_block - Check if PHY reset is blocked
@@ -1700,7 +1694,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
}
@@ -1796,7 +1790,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
}
@@ -1840,7 +1834,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
s32 ret_val = 0;
u16 phy_data, i, agc_value = 0;
u16 cur_agc_index, max_agc_index = 0;
- u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+ u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
IGP02E1000_PHY_AGC_A,
IGP02E1000_PHY_AGC_B,
@@ -1863,7 +1857,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
IGP02E1000_AGC_LENGTH_MASK;
/* Array index bound check. */
- if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+ if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
(cur_agc_index == 0)) {
ret_val = -E1000_ERR_PHY;
goto out;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index d5673eb90c54..b7b9c670bb3c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2159,6 +2159,27 @@ static int igb_set_coalesce(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
+ if (ec->rx_max_coalesced_frames ||
+ ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_max_coalesced_frames ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_rx_coalesce ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval)
+ return -ENOTSUPP;
+
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -2396,10 +2417,6 @@ static int igb_get_ts_info(struct net_device *dev,
info->rx_filters |=
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2f70a9b152bd..41e274046896 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -57,8 +57,8 @@
#include "igb.h"
#define MAJ 5
-#define MIN 2
-#define BUILD 18
+#define MIN 3
+#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -6621,22 +6621,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
+ unsigned int pull_len;
- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
+ if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+ if (likely(size <= IGB_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */
@@ -6648,8 +6651,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}
@@ -6791,62 +6807,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
}
/**
- * igb_pull_tail - igb specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being adjusted
- *
- * This function is an igb specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- /* retrieve timestamp from buffer */
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-
- /* update pointers to remove timestamp header */
- skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
- frag->page_offset += IGB_TS_HDR_LEN;
- skb->data_len -= IGB_TS_HDR_LEN;
- skb->len -= IGB_TS_HDR_LEN;
-
- /* move va to start of packet data */
- va += IGB_TS_HDR_LEN;
- }
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* igb_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -6873,10 +6833,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
}
}
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- igb_pull_tail(rx_ring, rx_desc, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 6b87d9634614..b1e364d26aa7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1394,14 +1394,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
/*
* Continue setup of fdirctrl register bits:
* Turn perfect match filtering on
- * Report hash in RSS field of Rx wb descriptor
* Initialize the drop queue
* Move the flexible bytes to use the ethertype - shift 6 words
* Set the maximum length per hash bucket to 0xA filters
* Send interrupt when 64 (0x4 * 16) filters are left
*/
fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
- IXGBE_FDIRCTRL_REPORT_STATUS |
(IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
(0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
(0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index ec7b2324b77b..f7aeb560a504 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2938,14 +2938,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9aa6104e34ea..3e6a9319c718 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1360,14 +1360,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
}
#endif /* CONFIG_IXGBE_DCA */
+
+#define IXGBE_RSS_L4_TYPES_MASK \
+ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- if (ring->netdev->features & NETIF_F_RXHASH)
- skb_set_hash(skb,
- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
- PKT_HASH_TYPE_L3);
+ u16 rss_type;
+
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+ IXGBE_RXDADV_RSSTYPE_MASK;
+
+ if (!rss_type)
+ return;
+
+ skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 770e21a64388..58434584b16d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
#define IXGBE_RXDADV_SPH 0x8000
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
IXGBE_RXD_ERR_CE | \
IXGBE_RXD_ERR_LE | \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index b2f5b161d792..d3e5f5b37999 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -813,22 +813,15 @@ static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- /* We support this operation only for 82599 and x540 at the moment */
- if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
- return IXGBEVF_82599_RETA_SIZE;
+ if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
+ return IXGBEVF_X550_VFRETA_SIZE;
- return 0;
+ return IXGBEVF_82599_RETA_SIZE;
}
static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- /* We support this operation only for 82599 and x540 at the moment */
- if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
- return IXGBEVF_RSS_HASH_KEY_SIZE;
-
- return 0;
+ return IXGBEVF_RSS_HASH_KEY_SIZE;
}
static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -840,21 +833,33 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
- /* If neither indirection table nor hash key was requested - just
- * return a success avoiding taking any locks.
- */
- if (!indir && !key)
- return 0;
+ if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
+ if (key)
+ memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
- spin_lock_bh(&adapter->mbx_lock);
- if (indir)
- err = ixgbevf_get_reta_locked(&adapter->hw, indir,
- adapter->num_rx_queues);
+ if (indir) {
+ int i;
- if (!err && key)
- err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+ for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+ }
+ } else {
+ /* If neither indirection table nor hash key was requested
+ * - just return a success avoiding taking any locks.
+ */
+ if (!indir && !key)
+ return 0;
- spin_unlock_bh(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
+ if (indir)
+ err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+ adapter->num_rx_queues);
+
+ if (!err && key)
+ err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+ }
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 775d08900949..04c7ec8446e0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -144,9 +144,11 @@ struct ixgbevf_ring {
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES 2
-#define IXGBEVF_82599_RETA_SIZE 128
+#define IXGBEVF_MAX_RSS_QUEUES 2
+#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */
+#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */
#define IXGBEVF_RSS_HASH_KEY_SIZE 40
+#define IXGBEVF_VFRSSRK_REGS 10 /* 10 registers for RSS key */
#define IXGBEVF_DEFAULT_TXD 1024
#define IXGBEVF_DEFAULT_RXD 512
@@ -447,6 +449,9 @@ struct ixgbevf_adapter {
spinlock_t mbx_lock;
unsigned long last_reset;
+
+ u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+ u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
};
enum ixbgevf_state_t {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde9cb01..88298a3ef942 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb);
}
+#define IXGBE_RSS_L4_TYPES_MASK \
+ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
+static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u16 rss_type;
+
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+ IXGBE_RXDADV_RSSTYPE_MASK;
+
+ if (!rss_type)
+ return;
+
+ skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: structure containig ring specific data
@@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ ixgbevf_rx_hash(rx_ring, rx_desc, skb);
ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -649,46 +676,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
}
/**
- * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an ixgbevf specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- **/
-static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
- struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* ixgbevf_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
}
}
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- ixgbevf_pull_tail(rx_ring, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IXGBEVF_RX_BUFSZ;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
#endif
+ unsigned int pull_len;
- if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
+ if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as is */
@@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
/* avoid re-using remote pages */
if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -1697,22 +1696,25 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vfmrqc = 0, vfreta = 0;
- u32 rss_key[10];
u16 rss_i = adapter->num_rx_queues;
- int i, j;
+ u8 i, j;
/* Fill out hash function seeds */
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+ netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
+ for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
- /* Fill out redirection table */
- for (i = 0, j = 0; i < 64; i++, j++) {
+ for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
if (j == rss_i)
j = 0;
- vfreta = (vfreta << 8) | (j * 0x1);
- if ((i & 3) == 3)
+
+ adapter->rss_indir_tbl[i] = j;
+
+ vfreta |= j << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+ vfreta = 0;
+ }
}
/* Perform hash on these packet types */
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 62e48bc0cb23..fe2299ac4f5c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3027,8 +3027,8 @@ static int mvneta_probe(struct platform_device *pdev)
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
const char *mac_from;
+ const char *managed;
int phy_mode;
- int fixed_phy = 0;
int err;
/* Our multiqueue support is not complete, so for now, only
@@ -3062,7 +3062,6 @@ static int mvneta_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot register fixed PHY\n");
goto err_free_irq;
}
- fixed_phy = 1;
/* In the case of a fixed PHY, the DT node associated
* to the PHY is the Ethernet MAC DT node.
@@ -3086,8 +3085,10 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
- pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
- fixed_phy;
+
+ err = of_property_read_string(dn, "managed", &managed);
+ pp->use_inband_status = (err == 0 &&
+ strcmp(managed, "in-band-status") == 0);
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index 52a6665b7abf..d54701047401 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -18,5 +18,6 @@ if NET_VENDOR_MELLANOX
source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 38fe32ef5e5f..2e2a5ec509ac 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLXSW_CORE) += mlxsw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 99ba1c50e585..f79d8124321e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -102,6 +102,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
"blueflame",
+ "phv-bit"
};
static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -1797,35 +1798,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+ bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+ bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
int i;
+ int ret = 0;
- if (bf_enabled_new == bf_enabled_old)
- return 0; /* Nothing to do */
+ if (bf_enabled_new != bf_enabled_old) {
+ if (bf_enabled_new) {
+ bool bf_supported = true;
- if (bf_enabled_new) {
- bool bf_supported = true;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ bf_supported &= priv->tx_ring[i]->bf_alloced;
- for (i = 0; i < priv->tx_ring_num; i++)
- bf_supported &= priv->tx_ring[i]->bf_alloced;
+ if (!bf_supported) {
+ en_err(priv, "BlueFlame is not supported\n");
+ return -EINVAL;
+ }
- if (!bf_supported) {
- en_err(priv, "BlueFlame is not supported\n");
- return -EINVAL;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+ } else {
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
- priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- } else {
- priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- }
-
- for (i = 0; i < priv->tx_ring_num; i++)
- priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ priv->tx_ring[i]->bf_enabled = bf_enabled_new;
- en_info(priv, "BlueFlame %s\n",
- bf_enabled_new ? "Enabled" : "Disabled");
+ en_info(priv, "BlueFlame %s\n",
+ bf_enabled_new ? "Enabled" : "Disabled");
+ }
+ if (phv_enabled_new != phv_enabled_old) {
+ ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+ if (ret)
+ return ret;
+ else if (phv_enabled_new)
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ else
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+ en_info(priv, "PHV bit %s\n",
+ phv_enabled_new ? "Enabled" : "Disabled");
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e0de2fd1ce12..4726122ea76b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2184,6 +2184,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+
+ /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+ * enable/disable make sure S-TAG flag is always in same state as
+ * C-TAG.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+ !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ return features;
+}
+
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2218,6 +2237,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
en_info(priv, "Turn %s TX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+ en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+ (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
en_info(priv, "Turn %s loopback\n",
(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2460,6 +2483,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2500,6 +2524,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2931,6 +2956,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ }
+
+ if (mlx4_is_slave(mdev->dev)) {
+ int phv;
+
+ err = get_phv_bit(mdev->dev, port, &phv);
+ if (!err && phv) {
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ }
+ } else {
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(mdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
+
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9c145dddd717..4402a1e48c9b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -725,7 +725,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
- if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+ if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
!(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
@@ -906,17 +906,25 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->csum_level = 1;
if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+ cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+ } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+ __vlan_hwaccel_put_tag(gro_skb,
+ htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
}
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(gro_skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
skb_mark_napi_id(gro_skb, &cq->napi);
@@ -962,12 +970,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK) &&
+ MLX4_CQE_CVLAN_PRESENT_MASK) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+ else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1065,7 +1080,10 @@ static const int frag_sizes[] = {
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+ * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+ */
+ int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
int buf_size = 0;
int i = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10d98f6ad96..494e7762fdb1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
+ u16 vlan_proto = 0;
int i_frag;
int lso_header_size;
void *fragptr = NULL;
@@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
vlan_tag = skb_vlan_tag_get(skb);
-
+ vlan_proto = be16_to_cpu(skb->vlan_proto);
+ }
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -958,8 +960,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->bf.offset ^= ring->bf.buf_size;
} else {
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- !!skb_vlan_tag_present(skb);
+ if (vlan_proto == ETH_P_8021AD)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+ else if (vlan_proto == ETH_P_8021Q)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+
tx_desc->ctrl.fence_size = real_size;
/* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e30bf57ad7a1..e8ec1dec5789 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -154,6 +154,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[26] = "Port ETS Scheduler support",
[27] = "Port beacon support",
[28] = "RX-ALL support",
+ [29] = "802.1ad offload support",
};
int i;
@@ -307,6 +308,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT 0x40
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
@@ -351,6 +353,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ if (dev->caps.phv_bit[port]) {
+ field = QUERY_FUNC_CAP_PHV_BIT;
+ MLX4_PUT(outbox->buf, field,
+ QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ }
+
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
@@ -600,6 +608,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -700,6 +711,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
@@ -898,6 +910,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
if (field & (1 << 2))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+ if (field & 0x40)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -1992,6 +2010,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ /* phv_check enable */
+ MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+ if (byte_field & 0x2)
+ param->phv_check_en = 1;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -2758,3 +2780,63 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
}
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID 0x10
+#define SET_PORT_GEN_PHV_EN 0x80
+
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+
+ context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID;
+ if (phv_bit)
+ context->phv_en |= SET_PORT_GEN_PHV_EN;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+ int err;
+ struct mlx4_func_cap func_cap;
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+ if (!err)
+ *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+ return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+ int ret;
+
+ if (mlx4_is_slave(dev))
+ return -EPERM;
+
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+ if (!ret)
+ dev->caps.phv_bit[port] = new_val;
+ return ret;
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 07cb7c2461ad..08de5555c2f4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -204,6 +204,7 @@ struct mlx4_init_hca_param {
u16 cqe_size; /* For use only when CQE stride feature enabled */
u16 eqe_size; /* For use only when EQE stride feature enabled */
u8 rss_ip_frags;
+ u8 phv_check_en; /* for QUERY_HCA */
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 29c2a017a450..121c579888bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+ struct mlx4_init_hca_param hca_param;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ /* Turn off PHV_EN flag in case phv_check_en is set.
+ * phv_check_en is a HW check that parse the packet and verify
+ * phv bit was reported correctly in the wqe. To allow QinQ
+ * PHV_EN flag should be set and phv_check_en must be cleared
+ * otherwise QinQ packets will be drop by the HW.
+ */
+ if (err || hca_param.phv_check_en)
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+ }
+
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -2912,6 +2927,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
{
u64 dev_flags = dev->flags;
int err = 0;
+ int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
+ MLX4_MAX_NUM_VF);
if (reset_flow) {
dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
@@ -2937,6 +2954,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
}
if (!(dev->flags & MLX4_FLAG_SRIOV)) {
+ if (total_vfs > fw_enabled_sriov_vfs) {
+ mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
+ total_vfs, fw_enabled_sriov_vfs);
+ err = -ENOMEM;
+ goto disable_sriov;
+ }
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
err = pci_enable_sriov(pdev, total_vfs);
}
@@ -3418,20 +3441,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
goto err_disable_pdev;
}
}
- if (total_vfs >= MLX4_MAX_NUM_VF) {
+ if (total_vfs > MLX4_MAX_NUM_VF) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) than allowed (%d)\n",
- total_vfs, MLX4_MAX_NUM_VF - 1);
+ "Requested more VF's (%d) than allowed by hw (%d)\n",
+ total_vfs, MLX4_MAX_NUM_VF);
err = -EINVAL;
goto err_disable_pdev;
}
for (i = 0; i < MLX4_MAX_PORTS; i++) {
- if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+ if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+ "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
nvfs[i] + nvfs[2], i + 1,
- MLX4_MAX_NUM_VF_P_PORT - 1);
+ MLX4_MAX_NUM_VF_P_PORT);
err = -EINVAL;
goto err_disable_pdev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a092c5c34d43..232b2b55f23b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -787,6 +787,9 @@ struct mlx4_set_port_general_context {
u8 pprx;
u8 pfcrx;
u16 reserved4;
+ u32 reserved5;
+ u8 phv_en;
+ u8 reserved6[3];
};
struct mlx4_set_port_rqp_calc_context {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 666d1669eb52..defcf8c395bf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -95,6 +95,7 @@
*/
#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV 2
#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 0715b497511f..6cb38304669f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -45,15 +45,34 @@
* register it in a memory region at HCA virtual address 0.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+ size_t size, dma_addr_t *dma_handle,
+ int node)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ int original_node;
+ void *cpu_handle;
+
+ mutex_lock(&priv->alloc_mutex);
+ original_node = dev_to_node(&dev->pdev->dev);
+ set_dev_node(&dev->pdev->dev, node);
+ cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+ dma_handle, GFP_KERNEL);
+ set_dev_node(&dev->pdev->dev, original_node);
+ mutex_unlock(&priv->alloc_mutex);
+ return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf, int node)
{
dma_addr_t t;
buf->size = size;
buf->npages = 1;
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
+ buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
+ &t, node);
if (!buf->direct.buf)
return -ENOMEM;
@@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
return 0;
}
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+ return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+ int node)
{
struct mlx5_db_pgdir *pgdir;
@@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
return NULL;
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
- pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
- &pgdir->db_dma, GFP_KERNEL);
+
+ pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+ &pgdir->db_dma, node);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
@@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
return 0;
}
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
{
struct mlx5_db_pgdir *pgdir;
int ret = 0;
@@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
if (!mlx5_alloc_db_from_pgdir(pgdir, db))
goto out;
- pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
+ pgdir = mlx5_alloc_db_pgdir(dev, node);
if (!pgdir) {
ret = -ENOMEM;
goto out;
@@ -145,6 +171,12 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3d23bd657e3c..e9d7d90363a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -60,6 +60,7 @@
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET 16
static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */
@@ -137,6 +138,80 @@ struct mlx5e_vport_stats {
#define NUM_VPORT_COUNTERS 31
};
+static const char pport_strings[][ETH_GSTRING_LEN] = {
+ /* IEEE802.3 counters */
+ "frames_tx",
+ "frames_rx",
+ "check_seq_err",
+ "alignment_err",
+ "octets_tx",
+ "octets_received",
+ "multicast_xmitted",
+ "broadcast_xmitted",
+ "multicast_rx",
+ "broadcast_rx",
+ "in_range_len_errors",
+ "out_of_range_len",
+ "too_long_errors",
+ "symbol_err",
+ "mac_control_tx",
+ "mac_control_rx",
+ "unsupported_op_rx",
+ "pause_ctrl_rx",
+ "pause_ctrl_tx",
+
+ /* RFC2863 counters */
+ "in_octets",
+ "in_ucast_pkts",
+ "in_discards",
+ "in_errors",
+ "in_unknown_protos",
+ "out_octets",
+ "out_ucast_pkts",
+ "out_discards",
+ "out_errors",
+ "in_multicast_pkts",
+ "in_broadcast_pkts",
+ "out_multicast_pkts",
+ "out_broadcast_pkts",
+
+ /* RFC2819 counters */
+ "drop_events",
+ "octets",
+ "pkts",
+ "broadcast_pkts",
+ "multicast_pkts",
+ "crc_align_errors",
+ "undersize_pkts",
+ "oversize_pkts",
+ "fragments",
+ "jabbers",
+ "collisions",
+ "p64octets",
+ "p65to127octets",
+ "p128to255octets",
+ "p256to511octets",
+ "p512to1023octets",
+ "p1024to1518octets",
+ "p1519to2047octets",
+ "p2048to4095octets",
+ "p4096to8191octets",
+ "p8192to10239octets",
+};
+
+#define NUM_IEEE_802_3_COUNTERS 19
+#define NUM_RFC_2863_COUNTERS 13
+#define NUM_RFC_2819_COUNTERS 21
+#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \
+ NUM_RFC_2863_COUNTERS + \
+ NUM_RFC_2819_COUNTERS)
+
+struct mlx5e_pport_stats {
+ __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
+ __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
+ __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
+};
+
static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
"csum_none",
@@ -179,6 +254,7 @@ struct mlx5e_sq_stats {
struct mlx5e_stats {
struct mlx5e_vport_stats vport;
+ struct mlx5e_pport_stats pport;
};
struct mlx5e_params {
@@ -195,6 +271,8 @@ struct mlx5e_params {
u16 rx_hash_log_tbl_sz;
bool lro_en;
u32 lro_wqe_sz;
+ u8 rss_hfunc;
+ u16 tx_max_inline;
};
enum {
@@ -214,6 +292,7 @@ struct mlx5e_cq {
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
+ struct mlx5e_priv *priv;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -237,6 +316,7 @@ struct mlx5e_rq {
struct mlx5_wq_ctrl wq_ctrl;
u32 rqn;
struct mlx5e_channel *channel;
+ struct mlx5e_priv *priv;
} ____cacheline_aligned_in_smp;
struct mlx5e_tx_skb_cb {
@@ -266,7 +346,9 @@ struct mlx5e_sq {
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
- u32 bf_offset;
+ u16 bf_offset;
+ u16 prev_cc;
+ u8 bf_budget;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
@@ -279,9 +361,10 @@ struct mlx5e_sq {
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
+ void __iomem *uar_bf_map;
struct netdev_queue *txq;
u32 sqn;
- u32 bf_buf_size;
+ u16 bf_buf_size;
u16 max_inline;
u16 edge;
struct device *pdev;
@@ -324,20 +407,24 @@ struct mlx5e_channel {
};
enum mlx5e_traffic_types {
- MLX5E_TT_IPV4_TCP = 0,
- MLX5E_TT_IPV6_TCP = 1,
- MLX5E_TT_IPV4_UDP = 2,
- MLX5E_TT_IPV6_UDP = 3,
- MLX5E_TT_IPV4 = 4,
- MLX5E_TT_IPV6 = 5,
- MLX5E_TT_ANY = 6,
- MLX5E_NUM_TT = 7,
+ MLX5E_TT_IPV4_TCP,
+ MLX5E_TT_IPV6_TCP,
+ MLX5E_TT_IPV4_UDP,
+ MLX5E_TT_IPV6_UDP,
+ MLX5E_TT_IPV4_IPSEC_AH,
+ MLX5E_TT_IPV6_IPSEC_AH,
+ MLX5E_TT_IPV4_IPSEC_ESP,
+ MLX5E_TT_IPV6_IPSEC_ESP,
+ MLX5E_TT_IPV4,
+ MLX5E_TT_IPV6,
+ MLX5E_TT_ANY,
+ MLX5E_NUM_TT,
};
-enum {
- MLX5E_RQT_SPREADING = 0,
- MLX5E_RQT_DEFAULT_RQ = 1,
- MLX5E_NUM_RQT = 2,
+enum mlx5e_rqt_ix {
+ MLX5E_INDIRECTION_RQT,
+ MLX5E_SINGLE_RQ_RQT,
+ MLX5E_NUM_RQT,
};
struct mlx5e_eth_addr_info {
@@ -362,10 +449,10 @@ struct mlx5e_eth_addr_db {
enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
MLX5E_STATE_OPENED,
+ MLX5E_STATE_DESTROYING,
};
struct mlx5e_vlan_db {
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 active_vlans_ft_ix[VLAN_N_VID];
u32 untagged_rule_ft_ix;
u32 any_vlan_rule_ft_ix;
@@ -379,7 +466,6 @@ struct mlx5e_flow_table {
struct mlx5e_priv {
/* priv data path fields - start */
- int num_tc;
int default_vlan_prio;
struct mlx5e_sq **txq_to_sq_map;
/* priv data path fields - end */
@@ -390,10 +476,11 @@ struct mlx5e_priv {
u32 pdn;
u32 tdn;
struct mlx5_core_mr mr;
+ struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC];
- u32 rqtn;
+ u32 rqtn[MLX5E_NUM_RQT];
u32 tirn[MLX5E_NUM_TT];
struct mlx5e_flow_table ft;
@@ -470,10 +557,9 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_open_flow_table(struct mlx5e_priv *priv);
-void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -482,17 +568,15 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
- struct mlx5e_params *new_params);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5e_tx_wqe *wqe)
+ struct mlx5e_tx_wqe *wqe, int bf_sz)
{
+ u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
@@ -503,9 +587,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/
wmb();
- mlx5_write64((__be32 *)&wqe->ctrl,
- sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
- NULL);
+ if (bf_sz) {
+ __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+
+ /* flush the write-combining mapped buffer */
+ wmb();
+
+ } else {
+ mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+ }
sq->bf_offset ^= sq->bf_buf_size;
}
@@ -519,3 +609,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 388938482ff9..b549797b315f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,9 +171,9 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_COUNTERS +
+ return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
priv->params.num_channels * NUM_RQ_STATS +
- priv->params.num_channels * priv->num_tc *
+ priv->params.num_channels * priv->params.num_tc *
NUM_SQ_STATS;
/* fallthrough */
default:
@@ -200,6 +200,11 @@ static void mlx5e_get_strings(struct net_device *dev,
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vport_strings[i]);
+ /* PPORT counters */
+ for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_strings[i]);
+
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
@@ -207,7 +212,7 @@ static void mlx5e_get_strings(struct net_device *dev,
"rx%d_%s", i, rq_stats_strings[j]);
for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data +
(idx++) * ETH_GSTRING_LEN,
@@ -234,6 +239,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
data[idx++] = ((u64 *)&priv->stats.vport)[i];
+ for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+ data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
@@ -242,7 +250,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
((u64 *)&priv->channel[i]->rq.stats)[j];
for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = !test_bit(MLX5E_STATE_OPENED,
&priv->state) ? 0 :
@@ -264,7 +272,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_params new_params;
+ bool was_opened;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
@@ -316,11 +324,18 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return 0;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
- new_params.log_rq_size = log_rq_size;
- new_params.log_sq_size = log_sq_size;
- new_params.min_rx_wqes = min_rx_wqes;
- err = mlx5e_update_priv_params(priv, &new_params);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.log_rq_size = log_rq_size;
+ priv->params.log_sq_size = log_sq_size;
+ priv->params.min_rx_wqes = min_rx_wqes;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -342,7 +357,7 @@ static int mlx5e_set_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
unsigned int count = ch->combined_count;
- struct mlx5e_params new_params;
+ bool was_opened;
int err = 0;
if (!count) {
@@ -365,9 +380,16 @@ static int mlx5e_set_channels(struct net_device *dev,
return 0;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
- new_params.num_channels = count;
- err = mlx5e_update_priv_params(priv, &new_params);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.num_channels = count;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -662,6 +684,101 @@ out:
return err;
}
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (hfunc)
+ *hfunc = priv->params.rss_hfunc;
+
+ return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+ return 0;
+
+ if ((hfunc != ETH_RSS_HASH_XOR) &&
+ (hfunc != ETH_RSS_HASH_TOP))
+ return -EINVAL;
+
+ mutex_lock(&priv->state_lock);
+
+ priv->params.rss_hfunc = hfunc;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_close_locked(dev);
+ err = mlx5e_open_locked(dev);
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ const struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ *(u32 *)data = priv->params.tx_max_inline;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool was_opened;
+ u32 val;
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val > mlx5e_get_max_inline_cap(mdev)) {
+ err = -EINVAL;
+ break;
+ }
+
+ mutex_lock(&priv->state_lock);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.tx_max_inline = val;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
+ mutex_unlock(&priv->state_lock);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -676,4 +793,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_coalesce = mlx5e_set_coalesce,
.get_settings = mlx5e_get_settings,
.set_settings = mlx5e_set_settings,
+ .get_rxfh = mlx5e_get_rxfh,
+ .set_rxfh = mlx5e_set_rxfh,
+ .get_tunable = mlx5e_get_tunable,
+ .set_tunable = mlx5e_set_tunable,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 120db80c47aa..e71563ce05d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
{
void *ft = priv->ft.main;
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
- if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ if (ai->tt_vec & BIT(MLX5E_TT_ANY))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
}
@@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
switch (eth_addr_type) {
case MLX5E_UC:
ret =
- (1 << MLX5E_TT_IPV4_TCP) |
- (1 << MLX5E_TT_IPV6_TCP) |
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_TCP) |
+ BIT(MLX5E_TT_IPV6_TCP) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV6_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
case MLX5E_MC_IPV4:
ret =
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV4) |
0;
break;
case MLX5E_MC_IPV6:
ret =
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV6) |
0;
break;
case MLX5E_MC_OTHER:
ret =
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
}
@@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
case MLX5E_ALLMULTI:
ret =
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
default: /* MLX5E_PROMISC */
ret =
- (1 << MLX5E_TT_IPV4_TCP) |
- (1 << MLX5E_TT_IPV6_TCP) |
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_TCP) |
+ BIT(MLX5E_TT_IPV6_TCP) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV6_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
}
@@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
u8 *match_criteria_dmac;
void *ft = priv->ft.main;
u32 *tirn = priv->tirn;
+ u32 *ft_ix;
u32 tt_vec;
int err;
@@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
tt_vec = mlx5e_get_tt_vec(ai, type);
- if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
+ if (tt_vec & BIT(MLX5E_TT_ANY)) {
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_ANY]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_ANY]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_ANY);
}
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.ethertype);
- if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
+ if (tt_vec & BIT(MLX5E_TT_IPV4)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
+ if (tt_vec & BIT(MLX5E_TT_IPV6)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6);
}
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_UDP);
- if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
}
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_TCP);
- if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_AH);
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_IPSEC_AH]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+ }
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_IPSEC_AH]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_ESP);
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+ }
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
}
return 0;
+
+err_del_ai:
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+ return err;
}
static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -498,44 +594,28 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ if (!priv->vlan.filter_disabled)
+ return;
- if (priv->vlan.filter_disabled) {
- priv->vlan.filter_disabled = false;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- }
+ priv->vlan.filter_disabled = false;
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
{
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ if (priv->vlan.filter_disabled)
+ return;
- if (!priv->vlan.filter_disabled) {
- priv->vlan.filter_disabled = true;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- }
+ priv->vlan.filter_disabled = true;
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int err = 0;
-
- mutex_lock(&priv->state_lock);
-
- set_bit(vid, priv->vlan.active_vlans);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
- vid);
- mutex_unlock(&priv->state_lock);
-
- return err;
+ return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
}
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -543,56 +623,11 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mutex_lock(&priv->state_lock);
-
- clear_bit(vid, priv->vlan.active_vlans);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-
- mutex_unlock(&priv->state_lock);
-
- return 0;
-}
-
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
-{
- u16 vid;
- int err;
-
- for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
- vid);
- if (err)
- return err;
- }
-
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- return err;
-
- if (priv->vlan.filter_disabled) {
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- if (err)
- return err;
- }
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
return 0;
}
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
-{
- u16 vid;
-
- if (priv->vlan.filter_disabled)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-
- for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-}
-
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -656,18 +691,21 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5e_sync_netdev_addr(priv);
mlx5e_apply_netdev_addr(priv);
}
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+void mlx5e_set_rx_mode_work(struct work_struct *work)
{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
struct net_device *ndev = priv->netdev;
- bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
@@ -700,17 +738,6 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
ea->broadcast_enabled = broadcast_enabled;
}
-void mlx5e_set_rx_mode_work(struct work_struct *work)
-{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- set_rx_mode_work);
-
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_set_rx_mode_core(priv);
- mutex_unlock(&priv->state_lock);
-}
-
void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
{
ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
@@ -725,7 +752,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
if (!g)
return -ENOMEM;
- g[0].log_sz = 2;
+ g[0].log_sz = 3;
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
outer_headers.ethertype);
@@ -833,7 +860,7 @@ static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
mlx5_destroy_flow_table(priv->ft.vlan);
}
-int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
{
int err;
@@ -845,16 +872,24 @@ int mlx5e_open_flow_table(struct mlx5e_priv *priv)
if (err)
goto err_destroy_main_flow_table;
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ goto err_destroy_vlan_flow_table;
+
return 0;
+err_destroy_vlan_flow_table:
+ mlx5e_destroy_vlan_flow_table(priv);
+
err_destroy_main_flow_table:
mlx5e_destroy_main_flow_table(priv);
return err;
}
-void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
{
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
mlx5e_destroy_vlan_flow_table(priv);
mlx5e_destroy_main_flow_table(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 40206da1f9d7..111427b33ec8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -41,6 +41,7 @@ struct mlx5e_rq_param {
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
+ u16 max_inline;
};
struct mlx5e_cq_param {
@@ -81,6 +82,47 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_pport_stats *s = &priv->stats.pport;
+ u32 *in;
+ u32 *out;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ in = mlx5_vzalloc(sz);
+ out = mlx5_vzalloc(sz);
+ if (!in || !out)
+ goto free_out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->IEEE_802_3_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->IEEE_802_3_counters));
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->RFC_2863_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->RFC_2863_counters));
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->RFC_2819_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->RFC_2819_counters));
+
+free_out:
+ kvfree(in);
+ kvfree(out);
+}
+
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -116,7 +158,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->rx_csum_none += rq_stats->csum_none;
s->rx_wqe_err += rq_stats->wqe_err;
- for (j = 0; j < priv->num_tc; j++) {
+ for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
s->tso_packets += sq_stats->tso_packets;
@@ -201,6 +243,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->tx_csum_offload = s->tx_packets - tx_offload_none;
s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+ mlx5e_update_pport_counters(priv);
free_out:
kvfree(out);
}
@@ -272,6 +315,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
int err;
int i;
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+
err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
@@ -304,6 +349,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->netdev = c->netdev;
rq->channel = c;
rq->ix = c->ix;
+ rq->priv = c->priv;
return 0;
@@ -321,8 +367,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = rq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
@@ -342,11 +387,11 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
memcpy(rqc, param->rqc, sizeof(param->rqc));
- MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
mlx5_fill_page_array(&rq->wq_ctrl.buf,
@@ -389,11 +434,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
static void mlx5e_disable_rq(struct mlx5e_rq *rq)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- mlx5_core_destroy_rq(mdev, rq->rqn);
+ mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
@@ -502,6 +543,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
return err;
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
&sq->wq_ctrl);
if (err)
@@ -509,7 +552,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
sq->uar_map = sq->uar.map;
+ sq->uar_bf_map = sq->uar.bf_map;
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+ sq->max_inline = param->max_inline;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
@@ -518,11 +563,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
txq_ix = c->ix + tc * priv->params.num_channels;
sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
- sq->pdev = c->pdev;
- sq->mkey_be = c->mkey_be;
- sq->channel = c;
- sq->tc = tc;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->bf_budget = MLX5E_SQ_BF_BUDGET;
priv->txq_to_sq_map[txq_ix] = sq;
return 0;
@@ -569,7 +615,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, user_index, sq->tc);
MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
@@ -579,7 +624,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, sq->uar.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
mlx5_fill_page_array(&sq->wq_ctrl.buf,
@@ -702,7 +747,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
int err;
u32 i;
- param->wq.numa = cpu_to_node(c->cpu);
+ param->wq.buf_numa_node = cpu_to_node(c->cpu);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@@ -732,6 +778,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
}
cq->channel = c;
+ cq->priv = priv;
return 0;
}
@@ -743,8 +790,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
- struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
@@ -773,7 +819,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
err = mlx5_core_create_cq(mdev, mcq, in, inlen);
@@ -790,8 +836,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
static void mlx5e_disable_cq(struct mlx5e_cq *cq)
{
- struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_cq(mdev, &cq->mcq);
@@ -929,7 +974,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mr.key);
- c->num_tc = priv->num_tc;
+ c->num_tc = priv->params.num_tc;
mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
@@ -1000,7 +1045,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
- param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
}
@@ -1014,7 +1059,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn);
- param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->max_inline = priv->params.tx_max_inline;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1059,27 +1105,28 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
struct mlx5e_channel_param cparam;
+ int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
int j;
- priv->channel = kcalloc(priv->params.num_channels,
- sizeof(struct mlx5e_channel *), GFP_KERNEL);
+ priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+ GFP_KERNEL);
- priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+ priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);
if (!priv->channel || !priv->txq_to_sq_map)
goto err_free_txq_to_sq_map;
mlx5e_build_channel_param(priv, &cparam);
- for (i = 0; i < priv->params.num_channels; i++) {
+ for (i = 0; i < nch; i++) {
err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
}
- for (j = 0; j < priv->params.num_channels; j++) {
+ for (j = 0; j < nch; j++) {
err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
if (err)
goto err_close_channels;
@@ -1109,67 +1156,71 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
kfree(priv->channel);
}
-static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+static int mlx5e_rx_hash_fn(int hfunc)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(create_tis_in)];
- void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
- memset(in, 0, sizeof(in));
-
- MLX5_SET(tisc, tisc, prio, tc);
- MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
-
- return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
-}
-
-static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
-{
- mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+ return (hfunc == ETH_RSS_HASH_TOP) ?
+ MLX5_RX_HASH_FN_TOEPLITZ :
+ MLX5_RX_HASH_FN_INVERTED_XOR8;
}
-static int mlx5e_open_tises(struct mlx5e_priv *priv)
+static int mlx5e_bits_invert(unsigned long a, int size)
{
- int num_tc = priv->num_tc;
- int err;
- int tc;
-
- for (tc = 0; tc < num_tc; tc++) {
- err = mlx5e_open_tis(priv, tc);
- if (err)
- goto err_close_tises;
- }
-
- return 0;
+ int inv = 0;
+ int i;
-err_close_tises:
- for (tc--; tc >= 0; tc--)
- mlx5e_close_tis(priv, tc);
+ for (i = 0; i < size; i++)
+ inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
- return err;
+ return inv;
}
-static void mlx5e_close_tises(struct mlx5e_priv *priv)
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
+ enum mlx5e_rqt_ix rqt_ix)
{
- int num_tc = priv->num_tc;
- int tc;
+ int i;
+ int log_sz;
+
+ switch (rqt_ix) {
+ case MLX5E_INDIRECTION_RQT:
+ log_sz = priv->params.rx_hash_log_tbl_sz;
+ for (i = 0; i < (1 << log_sz); i++) {
+ int ix = i;
+
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, log_sz);
+
+ ix = ix % priv->params.num_channels;
+ MLX5_SET(rqtc, rqtc, rq_num[i],
+ test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn);
+ }
- for (tc = 0; tc < num_tc; tc++)
- mlx5e_close_tis(priv, tc);
+ break;
+
+ default: /* MLX5E_SINGLE_RQ_RQT */
+ MLX5_SET(rqtc, rqtc, rq_num[0],
+ test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[0]->rq.rqn :
+ priv->drop_rq.rqn);
+
+ break;
+ }
}
-static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
void *rqtc;
int inlen;
- int err;
+ int log_sz;
int sz;
- int i;
+ int err;
- sz = 1 << priv->params.rx_hash_log_tbl_sz;
+ log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
+ priv->params.rx_hash_log_tbl_sz;
+ sz = 1 << log_sz;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
@@ -1181,198 +1232,104 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- for (i = 0; i < sz; i++) {
- int ix = i % priv->params.num_channels;
+ mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
- MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
- }
-
- MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
- if (!err)
- priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+ err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
kvfree(in);
return err;
}
-static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+static int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *rqtc;
+ int inlen;
+ int log_sz;
+ int sz;
+ int err;
- memset(in, 0, sizeof(in));
+ log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
+ priv->params.rx_hash_log_tbl_sz;
+ sz = 1 << log_sz;
- MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
- MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
- mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
- sizeof(out));
-}
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
-{
- void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+ mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
+ MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_L4_SPORT |\
- MLX5_HASH_FIELD_SEL_L4_DPORT)
-
- if (priv->params.lro_en) {
- MLX5_SET(tirc, tirc, lro_enable_mask,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (priv->params.lro_wqe_sz -
- ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[3]));
- }
+ err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
- switch (tt) {
- case MLX5E_TT_ANY:
- MLX5_SET(tirc, tirc, disp_type,
- MLX5_TIRC_DISP_TYPE_DIRECT);
- MLX5_SET(tirc, tirc, inline_rqn,
- priv->channel[0]->rq.rqn);
- break;
- default:
- MLX5_SET(tirc, tirc, disp_type,
- MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn);
- MLX5_SET(tirc, tirc, rx_hash_fn,
- MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
- rx_hash_toeplitz_key),
- MLX5_FLD_SZ_BYTES(tirc,
- rx_hash_toeplitz_key));
- break;
- }
+ kvfree(in);
- switch (tt) {
- case MLX5E_TT_IPV4_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+ return err;
+}
- case MLX5E_TT_IPV6_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+{
+ mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
+}
- case MLX5E_TT_IPV4_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
+{
+ mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+}
- case MLX5E_TT_IPV6_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+{
+ if (!priv->params.lro_en)
+ return;
- case MLX5E_TT_IPV4:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
- case MLX5E_TT_IPV6:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
- }
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[3]));
}
-static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
+
+ void *in;
void *tirc;
int inlen;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
+ tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- mlx5e_build_tir_ctx(priv, tirc, tt);
+ mlx5e_build_tir_ctx_lro(tirc, priv);
- err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+ err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
kvfree(in);
return err;
}
-static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
-{
- mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
-}
-
-static int mlx5e_open_tirs(struct mlx5e_priv *priv)
-{
- int err;
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_open_tir(priv, i);
- if (err)
- goto err_close_tirs;
- }
-
- return 0;
-
-err_close_tirs:
- for (i--; i >= 0; i--)
- mlx5e_close_tir(priv, i);
-
- return err;
-}
-
-static void mlx5e_close_tirs(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++)
- mlx5e_close_tir(priv, i);
-}
-
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1400,6 +1357,8 @@ int mlx5e_open_locked(struct net_device *netdev)
int num_txqs;
int err;
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
num_txqs = priv->params.num_channels * priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
@@ -1408,74 +1367,19 @@ int mlx5e_open_locked(struct net_device *netdev)
if (err)
return err;
- err = mlx5e_open_tises(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
- __func__, err);
- return err;
- }
-
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
__func__, err);
- goto err_close_tises;
- }
-
- err = mlx5e_open_rqt(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
- __func__, err);
- goto err_close_channels;
- }
-
- err = mlx5e_open_tirs(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
- __func__, err);
- goto err_close_rqls;
- }
-
- err = mlx5e_open_flow_table(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
- __func__, err);
- goto err_close_tirs;
- }
-
- err = mlx5e_add_all_vlan_rules(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
- __func__, err);
- goto err_close_flow_table;
+ return err;
}
- mlx5e_init_eth_addr(priv);
-
- set_bit(MLX5E_STATE_OPENED, &priv->state);
-
mlx5e_update_carrier(priv);
- mlx5e_set_rx_mode_core(priv);
+ mlx5e_redirect_rqts(priv);
schedule_delayed_work(&priv->update_stats_work, 0);
- return 0;
-
-err_close_flow_table:
- mlx5e_close_flow_table(priv);
-
-err_close_tirs:
- mlx5e_close_tirs(priv);
-
-err_close_rqls:
- mlx5e_close_rqt(priv);
-
-err_close_channels:
- mlx5e_close_channels(priv);
-
-err_close_tises:
- mlx5e_close_tises(priv);
- return err;
+ return 0;
}
static int mlx5e_open(struct net_device *netdev)
@@ -1496,14 +1400,9 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mlx5e_set_rx_mode_core(priv);
- mlx5e_del_all_vlan_rules(priv);
+ mlx5e_redirect_rqts(priv);
netif_carrier_off(priv->netdev);
- mlx5e_close_flow_table(priv);
- mlx5e_close_tirs(priv);
- mlx5e_close_rqt(priv);
mlx5e_close_channels(priv);
- mlx5e_close_tises(priv);
return 0;
}
@@ -1520,26 +1419,341 @@ static int mlx5e_close(struct net_device *netdev)
return err;
}
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
- struct mlx5e_params *new_params)
+static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
+ struct mlx5e_rq *rq,
+ struct mlx5e_rq_param *param)
{
- int err = 0;
- int was_opened;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int err;
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ param->wq.db_numa_node = param->wq.buf_numa_node;
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
+ err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
- priv->params = *new_params;
+ rq->priv = priv;
- if (was_opened)
- err = mlx5e_open_locked(priv->netdev);
+ return 0;
+}
+
+static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
+ struct mlx5e_cq *cq,
+ struct mlx5e_cq_param *param)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+
+ err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = param->eq_ix;
+ mcq->comp = mlx5e_completion_event;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ cq->priv = priv;
+
+ return 0;
+}
+
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+{
+ struct mlx5e_cq_param cq_param;
+ struct mlx5e_rq_param rq_param;
+ struct mlx5e_rq *rq = &priv->drop_rq;
+ struct mlx5e_cq *cq = &priv->drop_rq.cq;
+ int err;
+
+ memset(&cq_param, 0, sizeof(cq_param));
+ memset(&rq_param, 0, sizeof(rq_param));
+ mlx5e_build_rx_cq_param(priv, &cq_param);
+ mlx5e_build_rq_param(priv, &rq_param);
+
+ err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_cq(cq, &cq_param);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+ if (err)
+ goto err_disable_cq;
+
+ err = mlx5e_enable_rq(rq, &rq_param);
+ if (err)
+ goto err_destroy_rq;
+
+ return 0;
+
+err_destroy_rq:
+ mlx5e_destroy_rq(&priv->drop_rq);
+
+err_disable_cq:
+ mlx5e_disable_cq(&priv->drop_rq.cq);
+
+err_destroy_cq:
+ mlx5e_destroy_cq(&priv->drop_rq.cq);
+
+ return err;
+}
+
+static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+{
+ mlx5e_disable_rq(&priv->drop_rq);
+ mlx5e_destroy_rq(&priv->drop_rq);
+ mlx5e_disable_cq(&priv->drop_rq.cq);
+ mlx5e_destroy_cq(&priv->drop_rq.cq);
+}
+
+static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+ return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_create_tises(struct mlx5e_priv *priv)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < priv->params.num_tc; tc++) {
+ err = mlx5e_create_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return 0;
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(priv, tc);
+
+ return err;
+}
+
+static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+{
+ int tc;
+
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ mlx5e_destroy_tis(priv, tc);
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
+ mlx5e_build_tir_ctx_lro(tirc, priv);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn[MLX5E_INDIRECTION_RQT]);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ netdev_rss_key_fill(rss_key, len);
+ }
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ }
+}
+
+static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_create_tir(priv, i);
+ if (err)
+ goto err_destroy_tirs;
+ }
+
+ return 0;
+
+err_destroy_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_destroy_tir(priv, i);
return err;
}
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_destroy_tir(priv, i);
+}
+
static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
@@ -1589,20 +1803,26 @@ static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
netdev_features_t changes = features ^ netdev->features;
- struct mlx5e_params new_params;
- bool update_params = false;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
if (changes & NETIF_F_LRO) {
- new_params.lro_en = !!(features & NETIF_F_LRO);
- update_params = true;
+ bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params.lro_en = !!(features & NETIF_F_LRO);
+ mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
+ mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
}
- if (update_params)
- mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1611,8 +1831,6 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_disable_vlan_filter(priv);
}
- mutex_unlock(&priv->state_lock);
-
return 0;
}
@@ -1620,8 +1838,9 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool was_opened;
int max_mtu;
- int err;
+ int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
@@ -1633,8 +1852,16 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
}
mutex_lock(&priv->state_lock);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(netdev);
+
netdev->mtu = new_mtu;
- err = mlx5e_update_priv_params(priv, &priv->params);
+
+ if (was_opened)
+ err = mlx5e_open_locked(netdev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -1673,6 +1900,15 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0;
}
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+ int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ return bf_buf_size -
+ sizeof(struct mlx5e_tx_wqe) +
+ 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_comp_vectors)
@@ -1691,6 +1927,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.rx_hash_log_tbl_sz =
@@ -1700,6 +1937,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
+ priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
priv->params.lro_wqe_sz =
@@ -1708,7 +1946,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->mdev = mdev;
priv->netdev = netdev;
priv->params.num_channels = num_comp_vectors;
- priv->num_tc = priv->params.num_tc;
priv->default_vlan_prio = priv->params.default_vlan_prio;
spin_lock_init(&priv->async_events_spinlock);
@@ -1733,9 +1970,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
- if (priv->num_tc > 1) {
+ if (priv->params.num_tc > 1)
mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
- }
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->watchdog_timeo = 15 * HZ;
@@ -1819,43 +2055,95 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
if (err) {
- netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
goto err_free_netdev;
}
err = mlx5_core_alloc_pd(mdev, &priv->pdn);
if (err) {
- netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
goto err_unmap_free_uar;
}
err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
if (err) {
- netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc td failed, %d\n", err);
goto err_dealloc_pd;
}
err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
if (err) {
- netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "create mkey failed, %d\n", err);
goto err_dealloc_transport_domain;
}
- err = register_netdev(netdev);
+ err = mlx5e_create_tises(priv);
if (err) {
- netdev_err(netdev, "%s: register_netdev failed, %d\n",
- __func__, err);
+ mlx5_core_warn(mdev, "create tises failed, %d\n", err);
goto err_destroy_mkey;
}
+ err = mlx5e_open_drop_rq(priv);
+ if (err) {
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ goto err_destroy_tises;
+ }
+
+ err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+ if (err) {
+ mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+ goto err_close_drop_rq;
+ }
+
+ err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ if (err) {
+ mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
+ goto err_destroy_rqt_indir;
+ }
+
+ err = mlx5e_create_tirs(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
+ goto err_destroy_rqt_single;
+ }
+
+ err = mlx5e_create_flow_tables(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+ goto err_destroy_tirs;
+ }
+
+ mlx5e_init_eth_addr(priv);
+
+ err = register_netdev(netdev);
+ if (err) {
+ mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+ goto err_destroy_flow_tables;
+ }
+
mlx5e_enable_async_events(priv);
+ schedule_work(&priv->set_rx_mode_work);
return priv;
+err_destroy_flow_tables:
+ mlx5e_destroy_flow_tables(priv);
+
+err_destroy_tirs:
+ mlx5e_destroy_tirs(priv);
+
+err_destroy_rqt_single:
+ mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+
+err_destroy_rqt_indir:
+ mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+
+err_close_drop_rq:
+ mlx5e_close_drop_rq(priv);
+
+err_destroy_tises:
+ mlx5e_destroy_tises(priv);
+
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mr);
@@ -1879,13 +2167,22 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
struct net_device *netdev = priv->netdev;
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+
+ schedule_work(&priv->set_rx_mode_work);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
unregister_netdev(netdev);
+ mlx5e_destroy_flow_tables(priv);
+ mlx5e_destroy_tirs(priv);
+ mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_close_drop_rq(priv);
+ mlx5e_destroy_tises(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
- mlx5e_disable_async_events(priv);
- flush_scheduled_work();
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 03f28f438e55..64380bc0cd6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe);
+ mlx5e_tx_notify_hw(sq, wqe, 0);
}
}
@@ -110,9 +110,17 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
}
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool bf)
{
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ /* Some NIC TX decisions, e.g loopback, are based on the packet
+ * headers and occur before the data gather.
+ * Therefore these headers must be copied into the WQE
+ */
+#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+
+ if (bf && (skb_headlen(skb) <= sq->max_inline))
+ return skb_headlen(skb);
+
return MLX5E_MIN_INLINE;
}
@@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0;
+ bool bf = false;
u16 headlen;
u16 ds_cnt;
u16 ihs;
@@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
else
sq->stats.csum_offload_none++;
+ if (sq->cc != sq->prev_cc) {
+ sq->prev_cc = sq->cc;
+ sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+ }
+
if (skb_is_gso(skb)) {
u32 payload_len;
@@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len;
} else {
- ihs = mlx5e_get_inline_hdr_size(sq, skb);
+ bf = sq->bf_budget &&
+ !skb->xmit_more &&
+ !skb_shinfo(skb)->nr_frags;
+ ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
ETH_ZLEN);
}
@@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+ int bf_sz = 0;
+
+ if (bf && sq->uar_bf_map)
+ bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
+
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe);
+ mlx5e_tx_notify_hw(sq, wqe, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
+ sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+
sq->stats.packets++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 06e3e1e54c35..03aabdd79abe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -457,7 +457,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
- int numa_node = dev_to_node(&mdev->pdev->dev);
+ int numa_node = priv->numa_node;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -656,6 +656,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
}
#endif
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+ resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+ resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+ dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+ return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+ if (dev->priv.bf_mapping)
+ io_mapping_free(dev->priv.bf_mapping);
+}
+
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -670,6 +686,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock);
+ mutex_init(&priv->alloc_mutex);
+
+ priv->numa_node = dev_to_node(&dev->pdev->dev);
+
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
if (!priv->dbg_root)
return -ENOMEM;
@@ -806,10 +826,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
+ if (map_bf_area(dev))
+ dev_err(&pdev->dev, "Failed to map blue flame area\n");
+
err = mlx5_irq_set_affinity_hints(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
- goto err_free_comp_eqs;
+ goto err_unmap_bf_area;
}
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -821,7 +844,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
-err_free_comp_eqs:
+err_unmap_bf_area:
+ unmap_bf_area(dev);
+
free_comp_eqs(dev);
err_stop_eqs:
@@ -879,6 +904,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
+ unmap_bf_area(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fc88ecaecb4b..566a70488db1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -73,7 +73,12 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
int in_size, u32 *out,
int out_size)
{
- mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ int err;
+
+ err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ if (err)
+ return err;
+
return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 8d98b03026d5..b4c87c7b0cf0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -163,6 +163,18 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
}
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+
+ MLX5_SET(modify_tir_in, in, tirn, tirn);
+ MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
@@ -358,3 +370,44 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ int err;
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ return err;
+}
+
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+
+ MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
index f9ef244710d5..74cae51436e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -45,6 +45,8 @@ int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+ int inlen);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn);
@@ -61,4 +63,10 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 9ef85873ceea..eb05c845ece9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
@@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
goto err_free_uar;
}
+ if (mdev->priv.bf_mapping)
+ uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+ uar->index << PAGE_SHIFT);
+
return 0;
err_free_uar:
@@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
+ io_mapping_unmap(uar->bf_map);
iounmap(uar->map);
mlx5_cmd_free_uar(mdev, uar->index);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 8388411582cf..ce21ee5b2357 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
wq->sz_m1 = (1 << wq->log_sz) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e0ddd69fb429..6c2a8f95093c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -37,7 +37,8 @@
struct mlx5_wq_param {
int linear;
- int numa;
+ int buf_numa_node;
+ int db_numa_node;
};
struct mlx5_wq_ctrl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644
index 000000000000..2941d9c5ae48
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -0,0 +1,32 @@
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+ tristate "Mellanox Technologies Switch ASICs support"
+ ---help---
+ This driver supports Mellanox Technologies Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_core.
+
+config MLXSW_PCI
+ tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+ depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+ default m
+ ---help---
+ This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_pci.
+
+config MLXSW_SWITCHX2
+ tristate "Mellanox Technologies SwitchX-2 support"
+ depends on MLXSW_CORE && NET_SWITCHDEV
+ default m
+ ---help---
+ This driver supports Mellanox Technologies SwitchX-2 Ethernet
+ Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_switchx2.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644
index 000000000000..0a05f65ee814
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
+mlxsw_core-objs := core.o
+obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
+mlxsw_pci-objs := pci.o
+obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
+mlxsw_switchx2-objs := switchx2.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644
index 000000000000..770db17eb03f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -0,0 +1,1090 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/cmd.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE 4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+ return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+ kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+ memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod, char *in_mbox,
+ size_t in_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod,
+ bool out_mbox_direct,
+ char *out_mbox, size_t out_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+ out_mbox_direct, NULL, 0,
+ out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+ MLXSW_CMD_OPCODE_QUERY_FW = 0x004,
+ MLXSW_CMD_OPCODE_QUERY_BOARDINFO = 0x006,
+ MLXSW_CMD_OPCODE_QUERY_AQ_CAP = 0x003,
+ MLXSW_CMD_OPCODE_MAP_FA = 0xFFF,
+ MLXSW_CMD_OPCODE_UNMAP_FA = 0xFFE,
+ MLXSW_CMD_OPCODE_CONFIG_PROFILE = 0x100,
+ MLXSW_CMD_OPCODE_ACCESS_REG = 0x040,
+ MLXSW_CMD_OPCODE_SW2HW_DQ = 0x201,
+ MLXSW_CMD_OPCODE_HW2SW_DQ = 0x202,
+ MLXSW_CMD_OPCODE_2ERR_DQ = 0x01E,
+ MLXSW_CMD_OPCODE_QUERY_DQ = 0x022,
+ MLXSW_CMD_OPCODE_SW2HW_CQ = 0x016,
+ MLXSW_CMD_OPCODE_HW2SW_CQ = 0x017,
+ MLXSW_CMD_OPCODE_QUERY_CQ = 0x018,
+ MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
+ MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
+ MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+ switch (opcode) {
+ case MLXSW_CMD_OPCODE_QUERY_FW:
+ return "QUERY_FW";
+ case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+ return "QUERY_BOARDINFO";
+ case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+ return "QUERY_AQ_CAP";
+ case MLXSW_CMD_OPCODE_MAP_FA:
+ return "MAP_FA";
+ case MLXSW_CMD_OPCODE_UNMAP_FA:
+ return "UNMAP_FA";
+ case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+ return "CONFIG_PROFILE";
+ case MLXSW_CMD_OPCODE_ACCESS_REG:
+ return "ACCESS_REG";
+ case MLXSW_CMD_OPCODE_SW2HW_DQ:
+ return "SW2HW_DQ";
+ case MLXSW_CMD_OPCODE_HW2SW_DQ:
+ return "HW2SW_DQ";
+ case MLXSW_CMD_OPCODE_2ERR_DQ:
+ return "2ERR_DQ";
+ case MLXSW_CMD_OPCODE_QUERY_DQ:
+ return "QUERY_DQ";
+ case MLXSW_CMD_OPCODE_SW2HW_CQ:
+ return "SW2HW_CQ";
+ case MLXSW_CMD_OPCODE_HW2SW_CQ:
+ return "HW2SW_CQ";
+ case MLXSW_CMD_OPCODE_QUERY_CQ:
+ return "QUERY_CQ";
+ case MLXSW_CMD_OPCODE_SW2HW_EQ:
+ return "SW2HW_EQ";
+ case MLXSW_CMD_OPCODE_HW2SW_EQ:
+ return "HW2SW_EQ";
+ case MLXSW_CMD_OPCODE_QUERY_EQ:
+ return "QUERY_EQ";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum mlxsw_cmd_status {
+ /* Command execution succeeded. */
+ MLXSW_CMD_STATUS_OK = 0x00,
+ /* Internal error (e.g. bus error) occurred while processing command. */
+ MLXSW_CMD_STATUS_INTERNAL_ERR = 0x01,
+ /* Operation/command not supported or opcode modifier not supported. */
+ MLXSW_CMD_STATUS_BAD_OP = 0x02,
+ /* Parameter not supported, parameter out of range. */
+ MLXSW_CMD_STATUS_BAD_PARAM = 0x03,
+ /* System was not enabled or bad system state. */
+ MLXSW_CMD_STATUS_BAD_SYS_STATE = 0x04,
+ /* Attempt to access reserved or unallocated resource, or resource in
+ * inappropriate ownership.
+ */
+ MLXSW_CMD_STATUS_BAD_RESOURCE = 0x05,
+ /* Requested resource is currently executing a command. */
+ MLXSW_CMD_STATUS_RESOURCE_BUSY = 0x06,
+ /* Required capability exceeds device limits. */
+ MLXSW_CMD_STATUS_EXCEED_LIM = 0x08,
+ /* Resource is not in the appropriate state or ownership. */
+ MLXSW_CMD_STATUS_BAD_RES_STATE = 0x09,
+ /* Index out of range (might be beyond table size or attempt to
+ * access a reserved resource).
+ */
+ MLXSW_CMD_STATUS_BAD_INDEX = 0x0A,
+ /* NVMEM checksum/CRC failed. */
+ MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B,
+ /* Bad management packet (silently discarded). */
+ MLXSW_CMD_STATUS_BAD_PKT = 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_CMD_STATUS_OK:
+ return "OK";
+ case MLXSW_CMD_STATUS_INTERNAL_ERR:
+ return "INTERNAL_ERR";
+ case MLXSW_CMD_STATUS_BAD_OP:
+ return "BAD_OP";
+ case MLXSW_CMD_STATUS_BAD_PARAM:
+ return "BAD_PARAM";
+ case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+ return "BAD_SYS_STATE";
+ case MLXSW_CMD_STATUS_BAD_RESOURCE:
+ return "BAD_RESOURCE";
+ case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+ return "RESOURCE_BUSY";
+ case MLXSW_CMD_STATUS_EXCEED_LIM:
+ return "EXCEED_LIM";
+ case MLXSW_CMD_STATUS_BAD_RES_STATE:
+ return "BAD_RES_STATE";
+ case MLXSW_CMD_STATUS_BAD_INDEX:
+ return "BAD_INDEX";
+ case MLXSW_CMD_STATUS_BAD_NVMEM:
+ return "BAD_NVMEM";
+ case MLXSW_CMD_STATUS_BAD_PKT:
+ return "BAD_PKT";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of max CQEs allowed on CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 vpm_entries_count)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+ 0, vpm_entries_count,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+ char *in_mbox)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+ 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_fid_based
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of Flooding Tables. Flooding Tables are associated to
+ * the different packet types for the different switch partitions.
+ * Note that the table size depends on the fid_based mode.
+ * In SwitchX silicon, tables are split equally between the switch
+ * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
+ * with swid-1 and the last 4 are associated with swid-2.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+/* cmd_mbox_config_profile_fid_based
+ * FID Based Flood Mode
+ * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
+ * 01 Use FID to offset the index to the Port Group Table (pgi)
+ * 10 Use FID to offset the index to the Port Group Table (pgi) and
+ * the Multicast ID
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+ 0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+ 0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+ 0x60, 0, 8, 0x08, 0x00, false);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, char *out_mbox)
+{
+ return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+ 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+ opcode_mod, dq_number,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+ MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+ MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+ 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_cq_cv
+ * CQE Version.
+ * 0 - CQE Version 0, 1 - CQE Version 1
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_oi
+ * When set, overrun ignore is enabled. When set, updates of
+ * CQ consumer counter (poll for completion) or Request completion
+ * notifications (Arm CQ) DoorBells should not be rung on that CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+ u32 cq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+ 0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+ 0, cq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+ 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_int_oi
+ * When set, overrun ignore is enabled.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_eq_int_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_int_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_int_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_int_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+ u32 eq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+ 0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+ 0, eq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644
index 000000000000..09325b72d524
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -0,0 +1,1296 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct dentry *mlxsw_core_dbg_root;
+
+struct mlxsw_core_pcpu_stats {
+ u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
+ u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
+ u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
+ u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
+ struct u64_stats_sync syncp;
+ u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
+ u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
+ u32 trap_rx_invalid;
+ u32 port_rx_invalid;
+};
+
+struct mlxsw_core {
+ struct mlxsw_driver *driver;
+ const struct mlxsw_bus *bus;
+ void *bus_priv;
+ const struct mlxsw_bus_info *bus_info;
+ struct list_head rx_listener_list;
+ struct list_head event_listener_list;
+ struct {
+ struct sk_buff *resp_skb;
+ u64 tid;
+ wait_queue_head_t wait;
+ bool trans_active;
+ struct mutex lock; /* One EMAD transaction at a time. */
+ bool use_emad;
+ } emad;
+ struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
+ struct dentry *dbg_dir;
+ struct {
+ struct debugfs_blob_wrapper vsd_blob;
+ struct debugfs_blob_wrapper psid_blob;
+ } dbg;
+ unsigned long driver_priv[0];
+ /* driver_priv has to be always the last item */
+};
+
+struct mlxsw_rx_listener_item {
+ struct list_head list;
+ struct mlxsw_rx_listener rxl;
+ void *priv;
+};
+
+struct mlxsw_event_listener_item {
+ struct list_head list;
+ struct mlxsw_event_listener el;
+ void *priv;
+};
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+ switch (type) {
+ case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+ return "query";
+ case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+ return "write";
+ }
+ BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+ mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+ mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+ const struct mlxsw_reg_info *reg,
+ char *payload)
+{
+ mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+ mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+ memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+ const struct mlxsw_reg_info *reg,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+ mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+ mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+ mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+ if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+ else
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+ mlxsw_emad_op_tlv_class_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+ mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+ char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+ mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+ mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+ mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+ mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+ mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+ skb_reset_mac_header(skb);
+
+ return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_core *mlxsw_core)
+{
+ char *buf;
+
+ buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_end_tlv(buf);
+
+ buf = skb_push(skb, reg->len + sizeof(u32));
+ mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+ buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+
+ mlxsw_emad_construct_eth_hdr(skb);
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+ return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+ return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
+ MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload(const char *op_tlv)
+{
+ return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+}
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ int err;
+ int ret;
+
+ err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
+ if (err) {
+ dev_warn(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ dev_kfree_skb(skb);
+ return err;
+ }
+
+ mlxsw_core->emad.trans_active = true;
+ ret = wait_event_timeout(mlxsw_core->emad.wait,
+ !(mlxsw_core->emad.trans_active),
+ msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
+ if (!ret) {
+ dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ mlxsw_core->emad.trans_active = false;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
+ char *op_tlv)
+{
+ enum mlxsw_emad_op_tlv_status status;
+ u64 tid;
+
+ status = mlxsw_emad_op_tlv_status_get(op_tlv);
+ tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+
+ switch (status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return 0;
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
+ tid, status, mlxsw_emad_op_tlv_status_str(status));
+ return -EAGAIN;
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ default:
+ dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
+ tid, status, mlxsw_emad_op_tlv_status_str(status));
+ return -EIO;
+ }
+}
+
+static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb)
+{
+ return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct sk_buff *trans_skb;
+ int n_retry;
+ int err;
+
+ n_retry = 0;
+retry:
+ /* We copy the EMAD to a new skb, since we might need
+ * to retransmit it in case of failure.
+ */
+ trans_skb = skb_copy(skb, GFP_KERNEL);
+ if (!trans_skb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
+ if (!err) {
+ struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+
+ err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
+ if (err)
+ dev_kfree_skb(resp_skb);
+ if (!err || err != -EAGAIN)
+ goto out;
+ }
+ if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+
+out:
+ dev_kfree_skb(skb);
+ mlxsw_core->emad.tid++;
+ return err;
+}
+
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_core *mlxsw_core = priv;
+
+ if (mlxsw_emad_is_resp(skb) &&
+ mlxsw_core->emad.trans_active &&
+ mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
+ mlxsw_core->emad.resp_skb = skb;
+ mlxsw_core->emad.trans_active = false;
+ wake_up(&mlxsw_core->emad.wait);
+ } else {
+ dev_kfree_skb(skb);
+ }
+}
+
+static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
+ .func = mlxsw_emad_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ETHEMAD,
+};
+
+static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_TRAP_ID_ETHEMAD);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ /* Set the upper 32 bits of the transaction ID field to a random
+ * number. This allows us to discard EMADs addressed to other
+ * devices.
+ */
+ get_random_bytes(&mlxsw_core->emad.tid, 4);
+ mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+
+ init_waitqueue_head(&mlxsw_core->emad.wait);
+ mlxsw_core->emad.trans_active = false;
+ mutex_init(&mlxsw_core->emad.lock);
+
+ err = mlxsw_core_rx_listener_register(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_emad_traps_set(mlxsw_core);
+ if (err)
+ goto err_emad_trap_set;
+
+ mlxsw_core->emad.use_emad = true;
+
+ return 0;
+
+err_emad_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_TRAP_ID_ETHEMAD);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+ u16 reg_len)
+{
+ struct sk_buff *skb;
+ u16 emad_len;
+
+ emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+ (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+ sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+ return NULL;
+
+ skb = netdev_alloc_skb(NULL, emad_len);
+ if (!skb)
+ return NULL;
+ memset(skb->data, 0, emad_len);
+ skb_reserve(skb, emad_len);
+
+ return skb;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_core *mlxsw_core = file->private;
+ struct mlxsw_core_pcpu_stats *p;
+ u64 rx_packets, rx_bytes;
+ u64 tmp_rx_packets, tmp_rx_bytes;
+ u32 rx_dropped, rx_invalid;
+ unsigned int start;
+ int i;
+ int j;
+ static const char hdr[] =
+ " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
+ rx_packets = 0;
+ rx_bytes = 0;
+ rx_dropped = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ tmp_rx_packets = p->trap_rx_packets[i];
+ tmp_rx_bytes = p->trap_rx_bytes[i];
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ rx_packets += tmp_rx_packets;
+ rx_bytes += tmp_rx_bytes;
+ rx_dropped += p->trap_rx_dropped[i];
+ }
+ seq_printf(file, "trap %3d %12llu %12llu %10u\n",
+ i, rx_packets, rx_bytes, rx_dropped);
+ }
+ rx_invalid = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ rx_invalid += p->trap_rx_invalid;
+ }
+ seq_printf(file, "trap INV %10u\n",
+ rx_invalid);
+
+ for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
+ rx_packets = 0;
+ rx_bytes = 0;
+ rx_dropped = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ tmp_rx_packets = p->port_rx_packets[i];
+ tmp_rx_bytes = p->port_rx_bytes[i];
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ rx_packets += tmp_rx_packets;
+ rx_bytes += tmp_rx_bytes;
+ rx_dropped += p->port_rx_dropped[i];
+ }
+ seq_printf(file, "port %3d %12llu %12llu %10u\n",
+ i, rx_packets, rx_bytes, rx_dropped);
+ }
+ rx_invalid = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ rx_invalid += p->port_rx_invalid;
+ }
+ seq_printf(file, "port INV %10u\n",
+ rx_invalid);
+ return 0;
+}
+
+static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
+{
+ struct mlxsw_core *mlxsw_core = inode->i_private;
+
+ return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
+}
+
+static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
+ .owner = THIS_MODULE,
+ .open = mlxsw_core_rx_stats_dbg_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+ const char *buf, size_t size)
+{
+ __be32 *m = (__be32 *) buf;
+ int i;
+ int count = size / sizeof(__be32);
+
+ for (i = count - 1; i >= 0; i--)
+ if (m[i])
+ break;
+ i++;
+ count = i ? i : 1;
+ for (i = 0; i < count; i += 4)
+ dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+ i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+ be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_del(&mlxsw_driver->list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+ if (strcmp(mlxsw_driver->kind, kind) == 0)
+ return mlxsw_driver;
+ }
+ return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ if (!mlxsw_driver) {
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ }
+ if (mlxsw_driver) {
+ if (!try_module_get(mlxsw_driver->owner))
+ mlxsw_driver = NULL;
+ }
+
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return mlxsw_driver;
+}
+
+static void mlxsw_core_driver_put(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ if (!mlxsw_driver)
+ return;
+ module_put(mlxsw_driver->owner);
+}
+
+static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
+{
+ const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
+
+ mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
+ mlxsw_core_dbg_root);
+ if (!mlxsw_core->dbg_dir)
+ return -ENOMEM;
+ debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
+ mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
+ mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
+ mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
+ debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
+ &mlxsw_core->dbg.vsd_blob);
+ mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
+ mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
+ debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
+ &mlxsw_core->dbg.psid_blob);
+ return 0;
+}
+
+static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
+{
+ debugfs_remove_recursive(mlxsw_core->dbg_dir);
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv)
+{
+ const char *device_kind = mlxsw_bus_info->device_kind;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_driver *mlxsw_driver;
+ size_t alloc_size;
+ int err;
+
+ mlxsw_driver = mlxsw_core_driver_get(device_kind);
+ if (!mlxsw_driver)
+ return -EINVAL;
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_core) {
+ err = -ENOMEM;
+ goto err_core_alloc;
+ }
+
+ INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+ INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+ mlxsw_core->driver = mlxsw_driver;
+ mlxsw_core->bus = mlxsw_bus;
+ mlxsw_core->bus_priv = bus_priv;
+ mlxsw_core->bus_info = mlxsw_bus_info;
+
+ mlxsw_core->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
+ if (!mlxsw_core->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+ if (err)
+ goto err_bus_init;
+
+ err = mlxsw_emad_init(mlxsw_core);
+ if (err)
+ goto err_emad_init;
+
+ err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
+ mlxsw_bus_info);
+ if (err)
+ goto err_driver_init;
+
+ err = mlxsw_core_debugfs_init(mlxsw_core);
+ if (err)
+ goto err_debugfs_init;
+
+ return 0;
+
+err_debugfs_init:
+ mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+err_driver_init:
+ mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
+ free_percpu(mlxsw_core->pcpu_stats);
+err_alloc_stats:
+ kfree(mlxsw_core);
+err_core_alloc:
+ mlxsw_core_driver_put(device_kind);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+{
+ const char *device_kind = mlxsw_core->bus_info->device_kind;
+
+ mlxsw_core_debugfs_fini(mlxsw_core);
+ mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_emad_fini(mlxsw_core);
+ mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ free_percpu(mlxsw_core->pcpu_stats);
+ kfree(mlxsw_core);
+ mlxsw_core_driver_put(device_kind);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
+{
+ return container_of(driver_priv, struct mlxsw_core, driver_priv);
+}
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+ return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+ return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+ const struct mlxsw_rx_listener *rxl_b)
+{
+ return (rxl_a->func == rxl_b->func &&
+ rxl_a->local_port == rxl_b->local_port &&
+ rxl_a->trap_id == rxl_b->trap_id);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
+ rxl_item->priv == priv)
+ return rxl_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ if (rxl_item)
+ return -EEXIST;
+ rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+ if (!rxl_item)
+ return -ENOMEM;
+ rxl_item->rxl = *rxl;
+ rxl_item->priv = priv;
+
+ list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ if (!rxl_item)
+ return;
+ list_del_rcu(&rxl_item->list);
+ synchronize_rcu();
+ kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *event_listener_item = priv;
+ struct mlxsw_reg_info reg;
+ char *payload;
+ char *op_tlv = mlxsw_emad_op_tlv(skb);
+ char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+ reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+ reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+ payload = mlxsw_emad_reg_payload(op_tlv);
+ event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+ dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+ const struct mlxsw_event_listener *el_b)
+{
+ return (el_a->func == el_b->func &&
+ el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *el_item;
+
+ list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+ if (__is_event_listener_equal(&el_item->el, el) &&
+ el_item->priv == priv)
+ return el_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ int err;
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ if (el_item)
+ return -EEXIST;
+ el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+ if (!el_item)
+ return -ENOMEM;
+ el_item->el = *el;
+ el_item->priv = priv;
+
+ err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+ if (err)
+ goto err_rx_listener_register;
+
+ /* No reason to save item if we did not manage to register an RX
+ * listener for it.
+ */
+ list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+ return 0;
+
+err_rx_listener_register:
+ kfree(el_item);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ if (!el_item)
+ return;
+ mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+ list_del(&el_item->list);
+ kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ int err;
+ char *op_tlv;
+ struct sk_buff *skb;
+ struct mlxsw_tx_info tx_info = {
+ .local_port = MLXSW_PORT_CPU_PORT,
+ .is_emad = true,
+ };
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ if (!skb)
+ return -ENOMEM;
+
+ mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
+ mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+
+ err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
+ if (!err) {
+ op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
+ memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
+ reg->len);
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
+ mlxsw_core->emad.tid - 1);
+ mlxsw_core_buf_dump_dbg(mlxsw_core,
+ mlxsw_core->emad.resp_skb->data,
+ mlxsw_core->emad.resp_skb->len);
+
+ dev_kfree_skb(mlxsw_core->emad.resp_skb);
+ }
+
+ return err;
+}
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ int err, n_retry;
+ char *in_mbox, *out_mbox, *tmp;
+
+ in_mbox = mlxsw_cmd_mbox_alloc();
+ if (!in_mbox)
+ return -ENOMEM;
+
+ out_mbox = mlxsw_cmd_mbox_alloc();
+ if (!out_mbox) {
+ err = -ENOMEM;
+ goto free_in_mbox;
+ }
+
+ mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+ tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+ mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+ n_retry = 0;
+retry:
+ err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+ if (!err) {
+ err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
+ if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+ }
+
+ if (!err)
+ memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+ reg->len);
+
+ mlxsw_core->emad.tid++;
+ mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+ mlxsw_cmd_mbox_free(in_mbox);
+ return err;
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ u64 cur_tid;
+ int err;
+
+ if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
+ dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+ return -EINTR;
+ }
+
+ cur_tid = mlxsw_core->emad.tid;
+ dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ /* During initialization EMAD interface is not available to us,
+ * so we default to command interface. We switch to EMAD interface
+ * after setting the appropriate traps.
+ */
+ if (!mlxsw_core->emad.use_emad)
+ err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+ payload, type);
+ else
+ err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ payload, type);
+
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ mutex_unlock(&mlxsw_core->emad.lock);
+ return err;
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+ const struct mlxsw_rx_listener *rxl;
+ struct mlxsw_core_pcpu_stats *pcpu_stats;
+ u8 local_port = rx_info->sys_port;
+ bool found = false;
+
+ dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
+ __func__, rx_info->sys_port, rx_info->trap_id);
+
+ if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+ (local_port >= MLXSW_PORT_MAX_PORTS))
+ goto drop;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ rxl = &rxl_item->rxl;
+ if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+ rxl->local_port == local_port) &&
+ rxl->trap_id == rx_info->trap_id) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!found)
+ goto drop;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->port_rx_packets[local_port]++;
+ pcpu_stats->port_rx_bytes[local_port] += skb->len;
+ pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
+ pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ rxl->func(skb, local_port, rxl_item->priv);
+ return;
+
+drop:
+ if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
+ this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
+ else
+ this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
+ if (local_port >= MLXSW_PORT_MAX_PORTS)
+ this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
+ else
+ this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size)
+{
+ u8 status;
+ int err;
+
+ BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+ if (!mlxsw_core->bus->cmd_exec)
+ return -EOPNOTSUPP;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+ if (in_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+ }
+
+ err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+ opcode_mod, in_mod, out_mbox_direct,
+ in_mbox, in_mbox_size,
+ out_mbox, out_mbox_size, &status);
+
+ if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod, status, mlxsw_cmd_status_str(status));
+ } else if (err == -ETIMEDOUT) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod);
+ }
+
+ if (!err && out_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+static int __init mlxsw_core_module_init(void)
+{
+ mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+ if (!mlxsw_core_dbg_root)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+ debugfs_remove_recursive(mlxsw_core_dbg_root);
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644
index 000000000000..165808471188
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -0,0 +1,207 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "trap.h"
+#include "reg.h"
+
+#include "cmd.h"
+
+#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
+#define MODULE_MLXSW_DRIVER_ALIAS(kind) \
+ MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
+
+#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+
+struct mlxsw_core;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_tx_info {
+ u8 local_port;
+ bool is_emad;
+};
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+ const struct mlxsw_tx_info *tx_info);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_rx_listener {
+ void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
+ u8 local_port;
+ u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+ void (*func)(const struct mlxsw_reg_info *reg,
+ char *payload, void *priv);
+ enum mlxsw_event_trap_id trap_id;
+};
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+ u16 sys_port;
+ int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+ u8 used_type:1,
+ used_properties:1;
+ u8 type;
+ u8 properties;
+};
+
+struct mlxsw_config_profile {
+ u16 used_max_vepa_channels:1,
+ used_max_lag:1,
+ used_max_port_per_lag:1,
+ used_max_mid:1,
+ used_max_pgt:1,
+ used_max_system_port:1,
+ used_max_vlan_groups:1,
+ used_max_regions:1,
+ used_flood_tables:1,
+ used_flood_mode:1,
+ used_max_ib_mc:1,
+ used_max_pkey:1,
+ used_ar_sec:1,
+ used_adaptive_routing_group_cap:1;
+ u8 max_vepa_channels;
+ u16 max_lag;
+ u16 max_port_per_lag;
+ u16 max_mid;
+ u16 max_pgt;
+ u16 max_system_port;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u8 max_flood_tables;
+ u8 max_vid_flood_tables;
+ u8 flood_mode;
+ u16 max_ib_mc;
+ u16 max_pkey;
+ u8 ar_sec;
+ u16 adaptive_routing_group_cap;
+ u8 arn;
+ struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+ struct list_head list;
+ const char *kind;
+ struct module *owner;
+ size_t priv_size;
+ int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info);
+ void (*fini)(void *driver_priv);
+ void (*txhdr_construct)(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ u8 txhdr_len;
+ const struct mlxsw_config_profile *profile;
+};
+
+struct mlxsw_bus {
+ const char *kind;
+ int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile);
+ void (*fini)(void *bus_priv);
+ bool (*skb_transmit_busy)(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info);
+ int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status);
+};
+
+struct mlxsw_bus_info {
+ const char *device_kind;
+ const char *device_name;
+ struct device *dev;
+ struct {
+ u16 major;
+ u16 minor;
+ u16 subminor;
+ } fw_rev;
+ u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+ u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644
index 000000000000..97b6bb5d9185
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/emad.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518 /* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10 /* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+ MLXSW_EMAD_TLV_TYPE_END,
+ MLXSW_EMAD_TLV_TYPE_OP,
+ MLXSW_EMAD_TLV_TYPE_DR,
+ MLXSW_EMAD_TLV_TYPE_REG,
+ MLXSW_EMAD_TLV_TYPE_USERDATA,
+ MLXSW_EMAD_TLV_TYPE_OOBETH,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4 /* Length in u32 */
+
+enum {
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+ MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+ MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+ MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+ MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+ MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+ MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+ MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+ MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return "operation performed";
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ return "device is busy";
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ return "version not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ return "unknown TLV";
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ return "register not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ return "class not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ return "method not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ return "bad parameter";
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ return "resource not available";
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ return "acknowledged. retransmit";
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ return "internal error";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum {
+ MLXSW_EMAD_OP_TLV_REQUEST,
+ MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+ MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+ MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644
index 000000000000..ffd55d030ce2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -0,0 +1,405 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/item.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+ unsigned short offset; /* bytes in container */
+ unsigned short step; /* step in bytes for indexed items */
+ unsigned short in_step_offset; /* offset within one step */
+ unsigned char shift; /* shift in bits */
+ unsigned char element_size; /* size of element in bit array */
+ bool no_real_shift;
+ union {
+ unsigned char bits;
+ unsigned short bytes;
+ } size;
+ const char *name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+ size_t typesize)
+{
+ BUG_ON(index && !item->step);
+ if (item->offset % typesize != 0 ||
+ item->step % typesize != 0 ||
+ item->in_step_offset % typesize != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
+ item->name, item->offset, item->step,
+ item->in_step_offset, typesize);
+ BUG();
+ }
+
+ return ((item->offset + item->step * index + item->in_step_offset) /
+ typesize);
+}
+
+static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 tmp;
+
+ tmp = be16_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+ unsigned short index, u16 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u16 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be16_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 tmp;
+
+ tmp = be32_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+ unsigned short index, u32 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u32 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be32_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 tmp;
+
+ tmp = be64_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+ unsigned short index, u64 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+ u64 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be64_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
+ struct mlxsw_item *item)
+{
+ memcpy(dst, &buf[item->offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
+ struct mlxsw_item *item)
+{
+ memcpy(&buf[item->offset], src, item->size.bytes);
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+{
+ u16 max_index, be_index;
+ u16 offset; /* byte offset inside the array */
+
+ BUG_ON(index && !item->element_size);
+ if (item->offset % sizeof(u32) != 0 ||
+ BITS_PER_BYTE % item->element_size != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+ item->name, item->offset, item->element_size);
+ BUG();
+ }
+
+ max_index = (item->size.bytes << 3) / item->element_size - 1;
+ be_index = max_index - index;
+ offset = be_index * item->element_size >> 3;
+ *shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+
+ return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+ u16 index)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+ tmp = buf[offset];
+ tmp >>= shift;
+ tmp &= GENMASK(item->element_size - 1, 0);
+ return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+ u16 index, u8 val)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+ u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+ val <<= shift;
+ val &= mask;
+ tmp = buf[offset];
+ tmp &= ~mask;
+ tmp |= val;
+ buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname) \
+ mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u16 val) \
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
+ _sizebits, _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u64 val) \
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \
+{ \
+ __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \
+{ \
+ __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
+ _element_size) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .element_size = _element_size, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u8 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \
+{ \
+ return __mlxsw_item_bit_array_get(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
+{ \
+ return __mlxsw_item_bit_array_set(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+} \
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644
index 000000000000..a34f4742aa00
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -0,0 +1,1808 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+
+static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
+
+static const struct pci_device_id mlxsw_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+ {0, }
+};
+
+static struct dentry *mlxsw_pci_dbg_root;
+
+static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
+{
+ switch (id->device) {
+ case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
+ return MLXSW_DEVICE_KIND_SWITCHX2;
+ default:
+ BUG();
+ }
+}
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+ ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+ MLXSW_PCI_QUEUE_TYPE_SDQ,
+ MLXSW_PCI_QUEUE_TYPE_RDQ,
+ MLXSW_PCI_QUEUE_TYPE_CQ,
+ MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
+{
+ switch (q_type) {
+ case MLXSW_PCI_QUEUE_TYPE_SDQ:
+ return "sdq";
+ case MLXSW_PCI_QUEUE_TYPE_RDQ:
+ return "rdq";
+ case MLXSW_PCI_QUEUE_TYPE_CQ:
+ return "cq";
+ case MLXSW_PCI_QUEUE_TYPE_EQ:
+ return "eq";
+ }
+ BUG();
+}
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+ MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+ MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+ MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+ 0, /* unused */
+ 0, /* unused */
+ MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+ char *buf;
+ dma_addr_t mapaddr;
+ size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+ char *elem; /* pointer to actual dma mapped element mem chunk */
+ union {
+ struct {
+ struct sk_buff *skb;
+ } sdq;
+ struct {
+ struct sk_buff *skb;
+ } rdq;
+ } u;
+};
+
+struct mlxsw_pci_queue {
+ spinlock_t lock; /* for queue accesses */
+ struct mlxsw_pci_mem_item mem_item;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ u16 producer_counter;
+ u16 consumer_counter;
+ u16 count; /* number of elements in queue */
+ u8 num; /* queue number */
+ u8 elem_size; /* size of one element */
+ enum mlxsw_pci_queue_type type;
+ struct tasklet_struct tasklet; /* queue processing tasklet */
+ struct mlxsw_pci *pci;
+ union {
+ struct {
+ u32 comp_sdq_count;
+ u32 comp_rdq_count;
+ } cq;
+ struct {
+ u32 ev_cmd_count;
+ u32 ev_comp_count;
+ u32 ev_other_count;
+ } eq;
+ } u;
+};
+
+struct mlxsw_pci_queue_type_group {
+ struct mlxsw_pci_queue *q;
+ u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+ struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+ u32 doorbell_offset;
+ struct msix_entry msix_entry;
+ struct mlxsw_core *core;
+ struct {
+ u16 num_pages;
+ struct mlxsw_pci_mem_item *items;
+ } fw_area;
+ struct {
+ struct mutex lock; /* Lock access to command registers */
+ bool nopoll;
+ wait_queue_head_t wait;
+ bool wait_done;
+ struct {
+ u8 status;
+ u64 out_param;
+ } comp;
+ } cmd;
+ struct mlxsw_bus_info bus_info;
+ struct dentry *dbg_dir;
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+ tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+ size_t elem_size, int elem_index)
+{
+ return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->producer_counter & (q->count - 1);
+
+ if ((q->producer_counter - q->consumer_counter) == q->count)
+ return NULL;
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->consumer_counter & (q->count - 1);
+
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+ return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+ u32 (*get_elem_owner_func)(char *))
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = get_elem_owner_func(elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+ return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+ return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_type_offset[q->type],
+ q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_arm_type_offset[q->type],
+ q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+ q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+ int page_index)
+{
+ return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+
+ /* Set CQ of same number of this SDQ. */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_sdq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %5d\n",
+ i, q->producer_counter, q->consumer_counter,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, char *frag_data, size_t frag_len,
+ int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ dma_addr_t mapaddr;
+
+ mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
+ if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+ if (net_ratelimit())
+ dev_err(&pdev->dev, "failed to dma map tx frag\n");
+ return -EIO;
+ }
+ mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+ mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+ return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+ dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+ if (!frag_len)
+ return;
+ pci_unmap_single(pdev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ size_t buf_len = MLXSW_PORT_MAX_MTU;
+ char *wqe = elem_info->elem;
+ struct sk_buff *skb;
+ int err;
+
+ elem_info->u.rdq.skb = NULL;
+ skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Assume that wqe was previously zeroed. */
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ buf_len, DMA_FROM_DEVICE);
+ if (err)
+ goto err_frag_map;
+
+ elem_info->u.rdq.skb = skb;
+ return 0;
+
+err_frag_map:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ struct sk_buff *skb;
+ char *wqe;
+
+ skb = elem_info->u.rdq.skb;
+ wqe = elem_info->elem;
+
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+
+ /* Set CQ of same number of this RDQ with base
+ * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+ */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ BUG_ON(!elem_info);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err)
+ goto rollback;
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ }
+
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+ return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ int i;
+
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+}
+
+static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_rdq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %5d\n",
+ i, q->producer_counter, q->consumer_counter,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_cqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+ mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_cq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %10d %5d\n",
+ i, q->consumer_counter, q->u.cq.comp_sdq_count,
+ q->u.cq.comp_rdq_count, q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ struct sk_buff *skb;
+ int i;
+
+ spin_lock(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ skb = elem_info->u.sdq.skb;
+ wqe = elem_info->elem;
+ for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ elem_info->u.sdq.skb = NULL;
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+ spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ struct sk_buff *skb;
+ struct mlxsw_rx_info rx_info;
+ u16 byte_count;
+ int err;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ skb = elem_info->u.sdq.skb;
+ if (!skb)
+ return;
+ wqe = elem_info->elem;
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+ /* We do not support lag now */
+ if (mlxsw_pci_cqe_lag_get(cqe))
+ goto drop;
+
+ rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+ rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+ byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+ if (mlxsw_pci_cqe_crc_get(cqe))
+ byte_count -= ETH_FCS_LEN;
+ skb_put(skb, byte_count);
+ mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+put_new_skb:
+ memset(wqe, 0, q->elem_size);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err && net_ratelimit())
+ dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return;
+
+drop:
+ dev_kfree_skb_any(skb);
+ goto put_new_skb;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+ return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+}
+
+static void mlxsw_pci_cq_tasklet(unsigned long data)
+{
+ struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ char *cqe;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+ u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+
+ if (sendq) {
+ struct mlxsw_pci_queue *sdq;
+
+ sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+ wqe_counter, cqe);
+ q->u.cq.comp_sdq_count++;
+ } else {
+ struct mlxsw_pci_queue *rdq;
+
+ rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ wqe_counter, cqe);
+ q->u.cq.comp_rdq_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items) {
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ }
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_eqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+ mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_eq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
+ i, q->consumer_counter, q->u.eq.ev_cmd_count,
+ q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+ mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+ mlxsw_pci->cmd.comp.out_param =
+ ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+ mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+ mlxsw_pci->cmd.wait_done = true;
+ wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+ return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+}
+
+static void mlxsw_pci_eq_tasklet(unsigned long data)
+{
+ struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+ char *eqe;
+ u8 cqn;
+ bool cq_handle = false;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ memset(&active_cqns, 0, sizeof(active_cqns));
+
+ while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+ u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
+
+ switch (event_type) {
+ case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+ mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+ q->u.eq.ev_cmd_count++;
+ break;
+ case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+ cqn = mlxsw_pci_eqe_cqn_get(eqe);
+ set_bit(cqn, active_cqns);
+ cq_handle = true;
+ q->u.eq.ev_comp_count++;
+ break;
+ default:
+ q->u.eq.ev_other_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items) {
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ }
+
+ if (!cq_handle)
+ return;
+ for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+ q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+}
+
+struct mlxsw_pci_queue_ops {
+ const char *name;
+ enum mlxsw_pci_queue_type type;
+ int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q);
+ void (*fini)(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q);
+ void (*tasklet)(unsigned long data);
+ int (*dbg_read)(struct seq_file *s, void *data);
+ u16 elem_count;
+ u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
+ .init = mlxsw_pci_sdq_init,
+ .fini = mlxsw_pci_sdq_fini,
+ .dbg_read = mlxsw_pci_sdq_dbg_read,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
+ .init = mlxsw_pci_rdq_init,
+ .fini = mlxsw_pci_rdq_fini,
+ .dbg_read = mlxsw_pci_rdq_dbg_read,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_CQ,
+ .init = mlxsw_pci_cq_init,
+ .fini = mlxsw_pci_cq_fini,
+ .tasklet = mlxsw_pci_cq_tasklet,
+ .dbg_read = mlxsw_pci_cq_dbg_read,
+ .elem_count = MLXSW_PCI_CQE_COUNT,
+ .elem_size = MLXSW_PCI_CQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_EQ,
+ .init = mlxsw_pci_eq_init,
+ .fini = mlxsw_pci_eq_fini,
+ .tasklet = mlxsw_pci_eq_tasklet,
+ .dbg_read = mlxsw_pci_eq_dbg_read,
+ .elem_count = MLXSW_PCI_EQE_COUNT,
+ .elem_size = MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q, u8 q_num)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+ int i;
+ int err;
+
+ spin_lock_init(&q->lock);
+ q->num = q_num;
+ q->count = q_ops->elem_count;
+ q->elem_size = q_ops->elem_size;
+ q->type = q_ops->type;
+ q->pci = mlxsw_pci;
+
+ if (q_ops->tasklet)
+ tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+
+ mem_item->size = MLXSW_PCI_AQ_SIZE;
+ mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+ mem_item->size,
+ &mem_item->mapaddr);
+ if (!mem_item->buf)
+ return -ENOMEM;
+ memset(mem_item->buf, 0, mem_item->size);
+
+ q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+ if (!q->elem_info) {
+ err = -ENOMEM;
+ goto err_elem_info_alloc;
+ }
+
+ /* Initialize dma mapped elements info elem_info for
+ * future easy access.
+ */
+ for (i = 0; i < q->count; i++) {
+ struct mlxsw_pci_queue_elem_info *elem_info;
+
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ elem_info->elem =
+ __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+ }
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = q_ops->init(mlxsw_pci, mbox, q);
+ if (err)
+ goto err_q_ops_init;
+ return 0;
+
+err_q_ops_init:
+ kfree(q->elem_info);
+err_elem_info_alloc:
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+ q_ops->fini(mlxsw_pci, q);
+ kfree(q->elem_info);
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ u8 num_qs)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_type_group *queue_group;
+ char tmp[16];
+ int i;
+ int err;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+ if (!queue_group->q)
+ return -ENOMEM;
+
+ for (i = 0; i < num_qs; i++) {
+ err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+ &queue_group->q[i], i);
+ if (err)
+ goto err_queue_init;
+ }
+ queue_group->count = num_qs;
+
+ sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
+ debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
+ q_ops->dbg_read);
+
+ return 0;
+
+err_queue_init:
+ for (i--; i >= 0; i--)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+ return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+ int i;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ for (i = 0; i < queue_group->count; i++)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ u8 num_sdqs;
+ u8 sdq_log2sz;
+ u8 num_rdqs;
+ u8 rdq_log2sz;
+ u8 num_cqs;
+ u8 cq_log2sz;
+ u8 num_eqs;
+ u8 eq_log2sz;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+
+ num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+ sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+ num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+ rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+ num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+ cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+ num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+ eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+ if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
+ (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
+ (num_cqs != MLXSW_PCI_CQS_COUNT) ||
+ (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+ dev_err(&pdev->dev, "Unsupported number of queues\n");
+ return -EINVAL;
+ }
+
+ if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+ (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+ dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+ num_eqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize event queues\n");
+ return err;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+ num_cqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+ goto err_cqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+ num_sdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+ goto err_sdqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+ num_rdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+ goto err_rdqs_init;
+ }
+
+ /* We have to poll in command interface until queues are initialized */
+ mlxsw_pci->cmd.nopoll = true;
+ return 0;
+
+err_rdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+ return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci->cmd.nopoll = false;
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+ char *mbox, int index,
+ const struct mlxsw_swid_config *swid)
+{
+ u8 mask = 0;
+
+ if (swid->used_type) {
+ mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+ mbox, index, swid->type);
+ mask |= 1;
+ }
+ if (swid->used_properties) {
+ mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+ mbox, index, swid->properties);
+ mask |= 2;
+ }
+ mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_config_profile *profile)
+{
+ int i;
+
+ mlxsw_cmd_mbox_zero(mbox);
+
+ if (profile->used_max_vepa_channels) {
+ mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+ mbox, profile->max_vepa_channels);
+ }
+ if (profile->used_max_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_lag_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_lag_set(
+ mbox, profile->max_lag);
+ }
+ if (profile->used_max_port_per_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
+ mbox, profile->max_port_per_lag);
+ }
+ if (profile->used_max_mid) {
+ mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_mid_set(
+ mbox, profile->max_mid);
+ }
+ if (profile->used_max_pgt) {
+ mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pgt_set(
+ mbox, profile->max_pgt);
+ }
+ if (profile->used_max_system_port) {
+ mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_system_port_set(
+ mbox, profile->max_system_port);
+ }
+ if (profile->used_max_vlan_groups) {
+ mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+ mbox, profile->max_vlan_groups);
+ }
+ if (profile->used_max_regions) {
+ mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_regions_set(
+ mbox, profile->max_regions);
+ }
+ if (profile->used_flood_tables) {
+ mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+ mbox, profile->max_flood_tables);
+ mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+ mbox, profile->max_vid_flood_tables);
+ }
+ if (profile->used_flood_mode) {
+ mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_flood_mode_set(
+ mbox, profile->flood_mode);
+ }
+ if (profile->used_max_ib_mc) {
+ mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+ mbox, profile->max_ib_mc);
+ }
+ if (profile->used_max_pkey) {
+ mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pkey_set(
+ mbox, profile->max_pkey);
+ }
+ if (profile->used_ar_sec) {
+ mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ar_sec_set(
+ mbox, profile->ar_sec);
+ }
+ if (profile->used_adaptive_routing_group_cap) {
+ mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+ mbox, profile->adaptive_routing_group_cap);
+ }
+
+ for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+ mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+ &profile->swid_config[i]);
+
+ return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+ mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+ mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+ return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ u16 num_pages)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int i;
+ int err;
+
+ mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+ GFP_KERNEL);
+ if (!mlxsw_pci->fw_area.items)
+ return -ENOMEM;
+ mlxsw_pci->fw_area.num_pages = num_pages;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ for (i = 0; i < num_pages; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ mem_item->size = MLXSW_PCI_PAGE_SIZE;
+ mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+ mem_item->size,
+ &mem_item->mapaddr);
+ if (!mem_item->buf) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
+ mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+ }
+
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
+ if (err)
+ goto err_cmd_map_fa;
+
+ return 0;
+
+err_cmd_map_fa:
+err_alloc:
+ for (i--; i >= 0; i--) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+ return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int i;
+
+ mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+ for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_id;
+ struct mlxsw_pci_queue *q;
+ int i;
+
+ for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+ q = mlxsw_pci_eq_get(mlxsw_pci, i);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+ return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ char *mbox;
+ u16 num_pages;
+ int err;
+
+ mutex_init(&mlxsw_pci->cmd.lock);
+ init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+ mlxsw_pci->core = mlxsw_core;
+
+ mbox = mlxsw_cmd_mbox_alloc();
+ if (!mbox)
+ return -ENOMEM;
+ err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+ if (err)
+ goto err_query_fw;
+
+ mlxsw_pci->bus_info.fw_rev.major =
+ mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.minor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.subminor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+ dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+ err = -EINVAL;
+ goto err_iface_rev;
+ }
+ if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+ err = -EINVAL;
+ goto err_doorbell_page_bar;
+ }
+
+ mlxsw_pci->doorbell_offset =
+ mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+ num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+ err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+ if (err)
+ goto err_fw_area_init;
+
+ err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+ if (err)
+ goto err_boardinfo;
+
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+ if (err)
+ goto err_config_profile;
+
+ err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+ if (err)
+ goto err_aqs_init;
+
+ err = request_irq(mlxsw_pci->msix_entry.vector,
+ mlxsw_pci_eq_irq_handler, 0,
+ mlxsw_pci_driver_name, mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "IRQ request failed\n");
+ goto err_request_eq_irq;
+ }
+
+ goto mbox_put;
+
+err_request_eq_irq:
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_config_profile:
+err_boardinfo:
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+mbox_put:
+ mlxsw_cmd_mbox_free(mbox);
+ return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_tx_info *tx_info)
+{
+ u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+
+ return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+
+ return !mlxsw_pci_queue_elem_info_producer_get(q);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ int i;
+ int err;
+
+ if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ }
+
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ spin_lock_bh(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ if (!elem_info) {
+ /* queue is full */
+ err = -EAGAIN;
+ goto unlock;
+ }
+ elem_info->u.sdq.skb = skb;
+
+ wqe = elem_info->elem;
+ mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+ mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+ mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (err)
+ goto unmap_frags;
+ }
+
+ /* Set unused sq entries byte count to zero. */
+ for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+ /* Everything is set up, ring producer doorbell to get HW going */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ goto unlock;
+
+unmap_frags:
+ for (; i >= 0; i--)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+ spin_unlock_bh(&q->lock);
+ return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ dma_addr_t in_mapaddr = 0;
+ dma_addr_t out_mapaddr = 0;
+ bool evreq = mlxsw_pci->cmd.nopoll;
+ unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+ bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ int err;
+
+ *p_status = MLXSW_CMD_STATUS_OK;
+
+ err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+ if (err)
+ return err;
+
+ if (in_mbox) {
+ in_mapaddr = pci_map_single(mlxsw_pci->pdev, in_mbox,
+ in_mbox_size, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
+ in_mapaddr))) {
+ err = -EIO;
+ goto err_in_mbox_map;
+ }
+ }
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+
+ if (out_mbox) {
+ out_mapaddr = pci_map_single(mlxsw_pci->pdev, out_mbox,
+ out_mbox_size, PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
+ out_mapaddr))) {
+ err = -EIO;
+ goto err_out_mbox_map;
+ }
+ }
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+ mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+ *p_wait_done = false;
+
+ wmb(); /* all needs to be written before we write control register */
+ mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+ MLXSW_PCI_CIR_CTRL_GO_BIT |
+ (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+ (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+ opcode);
+
+ if (!evreq) {
+ unsigned long end;
+
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ *p_wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
+ } else {
+ wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+ *p_status = mlxsw_pci->cmd.comp.status;
+ }
+
+ err = 0;
+ if (*p_wait_done) {
+ if (*p_status)
+ err = -EIO;
+ } else {
+ err = -ETIMEDOUT;
+ }
+
+ if (!err && out_mbox && out_mbox_direct) {
+ /* Some commands does not use output param as address to mailbox
+ * but they store output directly into registers. In that case,
+ * copy registers into mbox buffer.
+ */
+ __be32 tmp;
+
+ if (!evreq) {
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+ }
+ }
+
+ if (out_mapaddr)
+ pci_unmap_single(mlxsw_pci->pdev, out_mapaddr, out_mbox_size,
+ PCI_DMA_FROMDEVICE);
+
+ /* fall through */
+
+err_out_mbox_map:
+ if (in_mapaddr)
+ pci_unmap_single(mlxsw_pci->pdev, in_mapaddr, in_mbox_size,
+ PCI_DMA_TODEVICE);
+err_in_mbox_map:
+ mutex_unlock(&mlxsw_pci->cmd.lock);
+
+ return err;
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+};
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ /* Current firware does not let us know when the reset is done.
+ * So we just wait here for constant time and hope for the best.
+ */
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlxsw_pci *mlxsw_pci;
+ int err;
+
+ mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+ if (!mlxsw_pci)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
+
+ err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ }
+
+ if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+ dev_err(&pdev->dev, "invalid PCI region size\n");
+ err = -EINVAL;
+ goto err_pci_resource_len_check;
+ }
+
+ mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!mlxsw_pci->hw_addr) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+ pci_set_master(pdev);
+
+ mlxsw_pci->pdev = pdev;
+ pci_set_drvdata(pdev, mlxsw_pci);
+
+ err = mlxsw_pci_sw_reset(mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "Software reset failed\n");
+ goto err_sw_reset;
+ }
+
+ err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
+ if (err) {
+ dev_err(&pdev->dev, "MSI-X init failed\n");
+ goto err_msix_init;
+ }
+
+ mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+ mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+ mlxsw_pci->bus_info.dev = &pdev->dev;
+
+ mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
+ mlxsw_pci_dbg_root);
+ if (!mlxsw_pci->dbg_dir) {
+ dev_err(&pdev->dev, "Failed to create debugfs dir\n");
+ goto err_dbg_create_dir;
+ }
+
+ err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+ &mlxsw_pci_bus, mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register bus device\n");
+ goto err_bus_device_register;
+ }
+
+ return 0;
+
+err_bus_device_register:
+ debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+err_dbg_create_dir:
+ pci_disable_msix(mlxsw_pci->pdev);
+err_msix_init:
+err_sw_reset:
+ iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+ pci_disable_device(pdev);
+err_pci_enable_device:
+ kfree(mlxsw_pci);
+ return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+ struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core);
+ debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+ pci_disable_msix(mlxsw_pci->pdev);
+ iounmap(mlxsw_pci->hw_addr);
+ pci_release_regions(mlxsw_pci->pdev);
+ pci_disable_device(mlxsw_pci->pdev);
+ kfree(mlxsw_pci);
+}
+
+static struct pci_driver mlxsw_pci_driver = {
+ .name = mlxsw_pci_driver_name,
+ .id_table = mlxsw_pci_id_table,
+ .probe = mlxsw_pci_probe,
+ .remove = mlxsw_pci_remove,
+};
+
+static int __init mlxsw_pci_module_init(void)
+{
+ int err;
+
+ mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
+ if (!mlxsw_pci_dbg_root)
+ return -ENOMEM;
+ err = pci_register_driver(&mlxsw_pci_driver);
+ if (err)
+ goto err_register_driver;
+ return 0;
+
+err_register_driver:
+ debugfs_remove_recursive(mlxsw_pci_dbg_root);
+ return err;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+ pci_unregister_driver(&mlxsw_pci_driver);
+ debugfs_remove_recursive(mlxsw_pci_dbg_root);
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644
index 000000000000..1ef9664b4512
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -0,0 +1,227 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
+#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE 4096
+
+#define MLXSW_PCI_CIR_BASE 0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
+
+#define MLXSW_PCI_SW_RESET 0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
+ ((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_RDQS_COUNT 24
+#define MLXSW_PCI_SDQS_COUNT 24
+#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_EQS_COUNT 2
+#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQ_COMP_NUM 1
+
+#define MLXSW_PCI_AQ_PAGES 8
+#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES 3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_crc
+ * Length include CRC. Indicates the length field includes
+ * the packet's CRC.
+ */
+MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644
index 000000000000..726f5435b32f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -0,0 +1,75 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/port.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU 10000
+
+#define MLXSW_PORT_DEFAULT_VID 1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT 255
+#define MLXSW_PORT_SWID_ALL_SWIDS 254
+#define MLXSW_PORT_SWID_TYPE_ETH 2
+
+#define MLXSW_PORT_MID 0xd000
+
+#define MLXSW_PORT_MAX_PHY_PORTS 0x40
+#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS
+
+#define MLXSW_PORT_DEVID_BITS_OFFSET 10
+#define MLXSW_PORT_PHY_BITS_OFFSET 4
+#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
+
+#define MLXSW_PORT_CPU_PORT 0x0
+
+#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
+
+enum mlxsw_port_admin_status {
+ MLXSW_PORT_ADMIN_STATUS_UP = 1,
+ MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+ MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+ MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+ MLXSW_PORT_OPER_STATUS_UP = 1,
+ MLXSW_PORT_OPER_STATUS_DOWN = 2,
+ MLXSW_PORT_OPER_STATUS_FAILURE = 4, /* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644
index 000000000000..096e1c12175a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -0,0 +1,1349 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/reg.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+ u16 id;
+ u16 len; /* In u8 */
+};
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
+ .id = MLXSW_REG_SGCR_ID,
+ .len = MLXSW_REG_SGCR_LEN,
+};
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+ MLXSW_REG_ZERO(sgcr, payload);
+ mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_spad = {
+ .id = MLXSW_REG_SPAD_ID,
+ .len = MLXSW_REG_SPAD_LEN,
+};
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SMID - Switch Multicast ID
+ * --------------------------
+ * In multi-chip configuration, each device should maintain mapping between
+ * Multicast ID (MID) into a list of local ports. This mapping is used in all
+ * the devices other than the ingress device, and is implemented as part of the
+ * FDB. The MID record maps from a MID, which is a unique identi- fier of the
+ * multicast group within the stacking domain, into a list of local ports into
+ * which the packet is replicated.
+ */
+#define MLXSW_REG_SMID_ID 0x2007
+#define MLXSW_REG_SMID_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_smid = {
+ .id = MLXSW_REG_SMID_ID,
+ .len = MLXSW_REG_SMID_LEN,
+};
+
+/* reg_smid_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
+
+/* reg_smid_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
+
+/* reg_smid_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
+
+/* reg_smid_port_mask
+ * Local port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
+{
+ MLXSW_REG_ZERO(smid, payload);
+ mlxsw_reg_smid_swid_set(payload, 0);
+ mlxsw_reg_smid_mid_set(payload, mid);
+ mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+ mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SSPR - Switch System Port Record Register
+ * -----------------------------------------
+ * Configures the system port to local port mapping.
+ */
+#define MLXSW_REG_SSPR_ID 0x2008
+#define MLXSW_REG_SSPR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sspr = {
+ .id = MLXSW_REG_SSPR_ID,
+ .len = MLXSW_REG_SSPR_LEN,
+};
+
+/* reg_sspr_m
+ * Master - if set, then the record describes the master system port.
+ * This is needed in case a local port is mapped into several system ports
+ * (for multipathing). That number will be reported as the source system
+ * port when packets are forwarded to the CPU. Only one master port is allowed
+ * per local port.
+ *
+ * Note: Must be set for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
+
+/* reg_sspr_local_port
+ * Local port number.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+
+/* reg_sspr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
+
+/* reg_sspr_system_port
+ * Unique identifier within the stacking domain that represents all the ports
+ * that are available in the system (external ports).
+ *
+ * Currently, only single-ASIC configurations are supported, so we default to
+ * 1:1 mapping between system ports and local ports.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
+
+static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(sspr, payload);
+ mlxsw_reg_sspr_m_set(payload, 1);
+ mlxsw_reg_sspr_local_port_set(payload, local_port);
+ mlxsw_reg_sspr_sub_port_set(payload, 0);
+ mlxsw_reg_sspr_system_port_set(payload, local_port);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_LEN 0x404
+
+static const struct mlxsw_reg_info mlxsw_reg_spms = {
+ .id = MLXSW_REG_SPMS_ID,
+ .len = MLXSW_REG_SPMS_LEN,
+};
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_spms_state {
+ MLXSW_REG_SPMS_STATE_NO_CHANGE,
+ MLXSW_REG_SPMS_STATE_DISCARDING,
+ MLXSW_REG_SPMS_STATE_LEARNING,
+ MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
+ enum mlxsw_reg_spms_state state)
+{
+ MLXSW_REG_ZERO(spms, payload);
+ mlxsw_reg_spms_local_port_set(payload, local_port);
+ mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID 0x2011
+#define MLXSW_REG_SFGC_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
+ .id = MLXSW_REG_SFGC_ID,
+ .len = MLXSW_REG_SFGC_LEN,
+};
+
+enum mlxsw_reg_sfgc_type {
+ MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
+ MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
+ MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+enum mlxsw_reg_sfgc_bridge_type {
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+ MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+ MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_mid
+ * The multicast ID for the swid. Not supported for Spectrum
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+ enum mlxsw_reg_sfgc_bridge_type bridge_type,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int flood_table)
+{
+ MLXSW_REG_ZERO(sfgc, payload);
+ mlxsw_reg_sfgc_type_set(payload, type);
+ mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfgc_table_type_set(payload, table_type);
+ mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+ mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+}
+
+/* SFTR - Switch Flooding Table Register
+ * -------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR_ID 0x2012
+#define MLXSW_REG_SFTR_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_sftr = {
+ .id = MLXSW_REG_SFTR_ID,
+ .len = MLXSW_REG_SFTR_LEN,
+};
+
+/* reg_sftr_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
+
+/* reg_sftr_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
+
+/* reg_sftr_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
+
+/* reg_sftr_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
+
+/* reg_sftr_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
+
+/* reg_sftr_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
+
+/* reg_sftr_cpu_port_mask
+ * CPU port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_sftr_pack(char *payload,
+ unsigned int flood_table,
+ unsigned int index,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int range)
+{
+ MLXSW_REG_ZERO(sftr, payload);
+ mlxsw_reg_sftr_swid_set(payload, 0);
+ mlxsw_reg_sftr_flood_table_set(payload, flood_table);
+ mlxsw_reg_sftr_index_set(payload, index);
+ mlxsw_reg_sftr_table_type_set(payload, table_type);
+ mlxsw_reg_sftr_range_set(payload, range);
+ mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+ mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
+ .id = MLXSW_REG_SPMLR_ID,
+ .len = MLXSW_REG_SPMLR_LEN,
+};
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+ MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+ MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+ MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_spmlr_learn_mode mode)
+{
+ MLXSW_REG_ZERO(spmlr, payload);
+ mlxsw_reg_spmlr_local_port_set(payload, local_port);
+ mlxsw_reg_spmlr_sub_port_set(payload, 0);
+ mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
+ .id = MLXSW_REG_PMLP_ID,
+ .len = MLXSW_REG_PMLP_LEN,
+};
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pmlp, payload);
+ mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
+ .id = MLXSW_REG_PMTU_ID,
+ .len = MLXSW_REG_PMTU_LEN,
+};
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+ u16 new_mtu)
+{
+ MLXSW_REG_ZERO(pmtu, payload);
+ mlxsw_reg_pmtu_local_port_set(payload, local_port);
+ mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+ mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+ mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ptys = {
+ .id = MLXSW_REG_PTYS_ID,
+ .len = MLXSW_REG_PTYS_LEN,
+};
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2 BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
+ u32 proto_admin)
+{
+ MLXSW_REG_ZERO(ptys, payload);
+ mlxsw_reg_ptys_local_port_set(payload, local_port);
+ mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+ mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+}
+
+static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_adm,
+ u32 *p_eth_proto_oper)
+{
+ if (p_eth_proto_cap)
+ *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
+ if (p_eth_proto_adm)
+ *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+ if (p_eth_proto_oper)
+ *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_ppad = {
+ .id = MLXSW_REG_PPAD_ID,
+ .len = MLXSW_REG_PPAD_LEN,
+};
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+ u8 local_port)
+{
+ MLXSW_REG_ZERO(ppad, payload);
+ mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+ mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_paos = {
+ .id = MLXSW_REG_PAOS_ID,
+ .len = MLXSW_REG_PAOS_LEN,
+};
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+ enum mlxsw_port_admin_status status)
+{
+ MLXSW_REG_ZERO(paos, payload);
+ mlxsw_reg_paos_swid_set(payload, 0);
+ mlxsw_reg_paos_local_port_set(payload, local_port);
+ mlxsw_reg_paos_admin_status_set(payload, status);
+ mlxsw_reg_paos_oper_status_set(payload, 0);
+ mlxsw_reg_paos_ase_set(payload, 1);
+ mlxsw_reg_paos_ee_set(payload, 1);
+ mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
+ .id = MLXSW_REG_PPCNT_ID,
+ .len = MLXSW_REG_PPCNT_LEN,
+};
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * 255 indicates all ports on the device, and is only allowed
+ * for Set() operation.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+ 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+ 0x08 + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+ 0x08 + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+ 0x08 + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+ 0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+ 0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+ 0x08 + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+ 0x08 + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+ 0x08 + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+ 0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+ 0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+ 0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+ 0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+ 0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+ 0x08 + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+ 0x08 + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+ 0x08 + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+ 0x08 + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+ 0x08 + 0x90, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(ppcnt, payload);
+ mlxsw_reg_ppcnt_swid_set(payload, 0);
+ mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+ mlxsw_reg_ppcnt_pnat_set(payload, 0);
+ mlxsw_reg_ppcnt_grp_set(payload, 0);
+ mlxsw_reg_ppcnt_clr_set(payload, 0);
+ mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_pspa = {
+ .id = MLXSW_REG_PSPA_ID,
+ .len = MLXSW_REG_PSPA_LEN,
+};
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+{
+ MLXSW_REG_ZERO(pspa, payload);
+ mlxsw_reg_pspa_swid_set(payload, swid);
+ mlxsw_reg_pspa_local_port_set(payload, local_port);
+ mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_htgt = {
+ .id = MLXSW_REG_HTGT_ID,
+ .len = MLXSW_REG_HTGT_LEN,
+};
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0 /* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0
+#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+ MLXSW_REG_HTGT_POLICER_DISABLE,
+ MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
+
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+{
+ u8 swid, rdq;
+
+ MLXSW_REG_ZERO(htgt, payload);
+ if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+ swid = MLXSW_PORT_SWID_ALL_SWIDS;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
+ } else {
+ swid = 0;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+ }
+ mlxsw_reg_htgt_swid_set(payload, swid);
+ mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+ mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+ mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
+ mlxsw_reg_htgt_pid_set(payload, 0);
+ mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+ mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+ mlxsw_reg_htgt_priority_set(payload, 0);
+ mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
+ mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
+ .id = MLXSW_REG_HPKT_ID,
+ .len = MLXSW_REG_HPKT_LEN,
+};
+
+enum {
+ MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+ MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+ MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_DISCARD,
+ MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+
+enum {
+ MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+ MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+ MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
+ u8 trap_group, u16 trap_id)
+{
+ MLXSW_REG_ZERO(hpkt, payload);
+ mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+ mlxsw_reg_hpkt_action_set(payload, action);
+ mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+ mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+ mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+}
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+ switch (reg_id) {
+ case MLXSW_REG_SGCR_ID:
+ return "SGCR";
+ case MLXSW_REG_SPAD_ID:
+ return "SPAD";
+ case MLXSW_REG_SMID_ID:
+ return "SMID";
+ case MLXSW_REG_SSPR_ID:
+ return "SSPR";
+ case MLXSW_REG_SPMS_ID:
+ return "SPMS";
+ case MLXSW_REG_SFGC_ID:
+ return "SFGC";
+ case MLXSW_REG_SFTR_ID:
+ return "SFTR";
+ case MLXSW_REG_SPMLR_ID:
+ return "SPMLR";
+ case MLXSW_REG_PMLP_ID:
+ return "PMLP";
+ case MLXSW_REG_PMTU_ID:
+ return "PMTU";
+ case MLXSW_REG_PTYS_ID:
+ return "PTYS";
+ case MLXSW_REG_PPAD_ID:
+ return "PPAD";
+ case MLXSW_REG_PAOS_ID:
+ return "PAOS";
+ case MLXSW_REG_PPCNT_ID:
+ return "PPCNT";
+ case MLXSW_REG_PSPA_ID:
+ return "PSPA";
+ case MLXSW_REG_HTGT_ID:
+ return "HTGT";
+ case MLXSW_REG_HPKT_ID:
+ return "HPKT";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
new file mode 100644
index 000000000000..3e52ee93438c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -0,0 +1,1568 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
+static const char mlxsw_sx_driver_version[] = "1.0";
+
+struct mlxsw_sx_port;
+
+#define MLXSW_SW_HW_ID_LEN 6
+
+struct mlxsw_sx {
+ struct mlxsw_sx_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 hw_id[MLXSW_SW_HW_ID_LEN];
+};
+
+struct mlxsw_sx_port_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 tx_dropped;
+};
+
+struct mlxsw_sx_port {
+ struct net_device *dev;
+ struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
+ struct mlxsw_sx *mlxsw_sx;
+ u8 local_port;
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ * The MSB is specified in the 'ctclass3' field.
+ * Range is 0-15, where 15 is the highest priority.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
+
+/* tx_hdr_swid
+ * Switch partition ID.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_ctclass3
+ * See field 'etclass'.
+ */
+MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
+
+/* tx_hdr_rdq
+ * RDQ for control packets sent to remote CPU.
+ * Must be set to 0x1F for EMADs, otherwise 0.
+ */
+MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
+
+/* tx_hdr_cpu_sig
+ * Signature control for packets going to CPU. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
+
+/* tx_hdr_sig
+ * Stacking protocl signature. Must be set to 0xE0E0.
+ */
+MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
+
+/* tx_hdr_stclass
+ * Stacking TClass.
+ */
+MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
+
+/* tx_hdr_emad
+ * EMAD bit. Must be set for EMADs.
+ */
+MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ bool is_emad = tx_info->is_emad;
+
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ /* We currently set default values for the egress tclass (QoS). */
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
+ MLXSW_TXHDR_ETCLASS_5);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+ mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
+ mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
+ MLXSW_TXHDR_RDQ_OTHER);
+ mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
+ mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
+ mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
+ mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
+ MLXSW_TXHDR_NOT_EMAD);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool is_up)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
+ is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+ MLXSW_PORT_ADMIN_STATUS_DOWN);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool *p_is_up)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+ u8 oper_status;
+ int err;
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+ if (err)
+ return err;
+ oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+ *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+ return 0;
+}
+
+static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+ int max_mtu;
+ int err;
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+ if (err)
+ return err;
+ max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+ if (mtu > max_mtu)
+ return -EINVAL;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int
+mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+ mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool *p_usable)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ return 0;
+}
+
+static int mlxsw_sx_port_open(struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+ if (err)
+ return err;
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int mlxsw_sx_port_stop(struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+}
+
+static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+ const struct mlxsw_tx_info tx_info = {
+ .local_port = mlxsw_sx_port->local_port,
+ .is_emad = false,
+ };
+ u64 len;
+ int err;
+
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+ struct sk_buff *skb_orig = skb;
+
+ skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
+ }
+ mlxsw_sx_txhdr_construct(skb, &tx_info);
+ len = skb->len;
+ /* Due to a race we might fail here because of a full queue. In that
+ * unlikely case we simply drop the packet.
+ */
+ err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+
+ if (!err) {
+ pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+ if (err)
+ return err;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sx_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx_port_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ u32 tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* tx_dropped is u32, updated without syncp protection. */
+ tx_dropped += p->tx_dropped;
+ }
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+ .ndo_open = mlxsw_sx_port_open,
+ .ndo_stop = mlxsw_sx_port_stop,
+ .ndo_start_xmit = mlxsw_sx_port_xmit,
+ .ndo_change_mtu = mlxsw_sx_port_change_mtu,
+ .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
+};
+
+static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mlxsw_sx_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_sx->bus_info->fw_rev.major,
+ mlxsw_sx->bus_info->fw_rev.minor,
+ mlxsw_sx->bus_info->fw_rev.subminor);
+ strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sx_port_hw_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
+ {
+ .str = "a_frames_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+ },
+ {
+ .str = "a_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+ },
+ {
+ .str = "a_frame_check_sequence_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+ },
+ {
+ .str = "a_alignment_errors",
+ .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+ },
+ {
+ .str = "a_octets_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+ },
+ {
+ .str = "a_octets_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+ },
+ {
+ .str = "a_in_range_length_errors",
+ .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+ },
+ {
+ .str = "a_out_of_range_length_field",
+ .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+ },
+ {
+ .str = "a_frame_too_long_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+ },
+ {
+ .str = "a_symbol_error_during_carrier",
+ .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+ },
+ {
+ .str = "a_mac_control_frames_transmitted",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+ },
+ {
+ .str = "a_mac_control_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+ },
+ {
+ .str = "a_unsupported_opcodes_received",
+ .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_xmitted",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+ },
+};
+
+#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
+
+static void mlxsw_sx_port_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sx_port_hw_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void mlxsw_sx_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
+ data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return MLXSW_SX_PORT_HW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct mlxsw_sx_port_link_mode {
+ u32 mask;
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+};
+
+static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .speed = 25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .speed = 50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .speed = 100000,
+ },
+};
+
+#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+
+static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return SUPPORTED_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+ return SUPPORTED_Backplane;
+ return 0;
+}
+
+static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+ modes |= mlxsw_sx_port_link_mode[i].supported;
+ }
+ return modes;
+}
+
+static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+ modes |= mlxsw_sx_port_link_mode[i].advertised;
+ }
+ return modes;
+}
+
+static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_cmd *cmd)
+{
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ if (!carrier_ok)
+ goto out;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
+ speed = mlxsw_sx_port_link_mode[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return PORT_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+ return PORT_DA;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+ return PORT_NONE;
+
+ return PORT_OTHER;
+}
+
+static int mlxsw_sx_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_oper;
+ int err;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
+
+ cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+ mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+ mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+ cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
+ cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (advertising & mlxsw_sx_port_link_mode[i].advertised)
+ ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static u32 mlxsw_sx_to_ptys_speed(u32 speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (speed == mlxsw_sx_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static int mlxsw_sx_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 speed;
+ u32 eth_proto_new;
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ bool is_up;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+ mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+ mlxsw_sx_to_ptys_speed(speed);
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+ eth_proto_new = eth_proto_new & eth_proto_cap;
+ if (!eth_proto_new) {
+ netdev_err(dev, "Not supported proto admin requested");
+ return -EINVAL;
+ }
+ if (eth_proto_new == eth_proto_admin)
+ return 0;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to set proto admin");
+ return err;
+ }
+
+ err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
+ if (err) {
+ netdev_err(dev, "Failed to get oper status");
+ return err;
+ }
+ if (!is_up)
+ return 0;
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_sx_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlxsw_sx_port_get_strings,
+ .get_ethtool_stats = mlxsw_sx_port_get_stats,
+ .get_sset_count = mlxsw_sx_port_get_sset_count,
+ .get_settings = mlxsw_sx_port_get_settings,
+ .set_settings = mlxsw_sx_port_set_settings,
+};
+
+static int mlxsw_sx_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
+ memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
+ .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
+};
+
+static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN];
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
+ return 0;
+}
+
+static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ struct net_device *dev = mlxsw_sx_port->dev;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+ int err;
+
+ mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+ /* The last byte value in base mac address is guaranteed
+ * to be such it does not overflow when adding local_port
+ * value.
+ */
+ dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
+ return 0;
+}
+
+static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u16 vid, enum mlxsw_reg_spms_state state)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char *spms_pl;
+ int err;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u32 speed)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ enum mlxsw_reg_spmlr_learn_mode mode)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char spmlr_pl[MLXSW_REG_SPMLR_LEN];
+
+ mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
+}
+
+static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port;
+ struct net_device *dev;
+ bool usable;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
+ if (!dev)
+ return -ENOMEM;
+ mlxsw_sx_port = netdev_priv(dev);
+ mlxsw_sx_port->dev = dev;
+ mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+ mlxsw_sx_port->local_port = local_port;
+
+ mlxsw_sx_port->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
+ if (!mlxsw_sx_port->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
+ dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
+
+ err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
+ mlxsw_sx_port->local_port);
+ goto err_dev_addr_get;
+ }
+
+ netif_carrier_off(dev);
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+ NETIF_F_VLAN_CHALLENGED;
+
+ /* Each packet needs to have a Tx header (metadata) on top all other
+ * headers.
+ */
+ dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+ err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_module_check;
+ }
+
+ if (!usable) {
+ dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+ mlxsw_sx_port->local_port);
+ goto port_not_usable;
+ }
+
+ err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_system_port_mapping_set;
+ }
+
+ err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_speed_set;
+ }
+
+ err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+ if (err)
+ goto err_port_admin_status_set;
+
+ err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
+ MLXSW_PORT_DEFAULT_VID,
+ MLXSW_REG_SPMS_STATE_FORWARDING);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_stp_state_set;
+ }
+
+ err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
+ MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_mac_learning_mode_set;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_sx_port->local_port);
+ goto err_register_netdev;
+ }
+
+ mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+ return 0;
+
+err_register_netdev:
+err_port_admin_status_set:
+err_port_mac_learning_mode_set:
+err_port_stp_state_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_get:
+ free_percpu(mlxsw_sx_port->pcpu_stats);
+err_alloc_stats:
+ free_netdev(dev);
+ return err;
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+ if (!mlxsw_sx_port)
+ return;
+ unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ free_percpu(mlxsw_sx_port->pcpu_stats);
+ free_netdev(mlxsw_sx_port->dev);
+}
+
+static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
+{
+ int i;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ mlxsw_sx_port_remove(mlxsw_sx, i);
+ kfree(mlxsw_sx->ports);
+}
+
+static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
+{
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+ mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_sx->ports)
+ return -ENOMEM;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ err = mlxsw_sx_port_create(mlxsw_sx, i);
+ if (err)
+ goto err_port_create;
+ }
+ return 0;
+
+err_port_create:
+ for (i--; i >= 1; i--)
+ mlxsw_sx_port_remove(mlxsw_sx, i);
+ kfree(mlxsw_sx->ports);
+ return err;
+}
+
+static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
+ char *pude_pl, void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx_port *mlxsw_sx_port;
+ enum mlxsw_reg_pude_oper_status status;
+ u8 local_port;
+
+ local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+ mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ if (!mlxsw_sx_port) {
+ dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ status = mlxsw_reg_pude_oper_status_get(pude_pl);
+ if (MLXSW_PORT_OPER_STATUS_UP == status) {
+ netdev_info(mlxsw_sx_port->dev, "link up\n");
+ netif_carrier_on(mlxsw_sx_port->dev);
+ } else {
+ netdev_info(mlxsw_sx_port->dev, "link down\n");
+ netif_carrier_off(mlxsw_sx_port->dev);
+ }
+}
+
+static struct mlxsw_event_listener mlxsw_sx_pude_event = {
+ .func = mlxsw_sx_pude_event_func,
+ .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sx_pude_event;
+ break;
+ }
+ err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_event_trap_set;
+
+ return 0;
+
+err_event_trap_set:
+ mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+ return err;
+}
+
+static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sx_pude_event;
+ break;
+ }
+ mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+}
+
+static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sx_port)) {
+ if (net_ratelimit())
+ dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ skb->dev = mlxsw_sx_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_FDB_MC,
+ },
+ /* Traps for specific L2 packet types, not trapped as FDB MC */
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_STP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LACP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_EAPOL,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LLDP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MMRP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MVRP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RPVST,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_DHCP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+ },
+};
+
+static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+ err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ if (err)
+ goto err_rx_listener_register;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ mlxsw_sx_rx_listener[i].trap_id);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_rx_trap_set;
+ }
+ return 0;
+
+err_rx_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+err_rx_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ mlxsw_sx_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ }
+ return err;
+}
+
+static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ mlxsw_sx_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ }
+}
+
+static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
+{
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+ char sgcr_pl[MLXSW_REG_SGCR_LEN];
+ char *smid_pl;
+ char *sftr_pl;
+ int err;
+
+ /* Due to FW bug, we must configure SMID. */
+ smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+ if (!smid_pl)
+ return -ENOMEM;
+ mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
+ kfree(smid_pl);
+ if (err)
+ return err;
+
+ /* Configure a flooding table, which includes only CPU port. */
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+ mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
+ kfree(sftr_pl);
+ if (err)
+ return err;
+
+ /* Flood different packet types using the flooding table. */
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_BROADCAST,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sgcr_pack(sgcr_pl, true);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
+}
+
+static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ int err;
+
+ mlxsw_sx->core = mlxsw_core;
+ mlxsw_sx->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_sx_hw_id_get(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
+ return err;
+ }
+
+ err = mlxsw_sx_ports_create(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
+ return err;
+ }
+
+ err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
+ goto err_event_register;
+ }
+
+ err = mlxsw_sx_traps_init(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
+ goto err_rx_listener_register;
+ }
+
+ err = mlxsw_sx_flood_init(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
+ goto err_flood_init;
+ }
+
+ return 0;
+
+err_flood_init:
+ mlxsw_sx_traps_fini(mlxsw_sx);
+err_rx_listener_register:
+ mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+ mlxsw_sx_ports_remove(mlxsw_sx);
+ return err;
+}
+
+static void mlxsw_sx_fini(void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+
+ mlxsw_sx_traps_fini(mlxsw_sx);
+ mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+ mlxsw_sx_ports_remove(mlxsw_sx);
+}
+
+static struct mlxsw_config_profile mlxsw_sx_config_profile = {
+ .used_max_vepa_channels = 1,
+ .max_vepa_channels = 0,
+ .used_max_lag = 1,
+ .max_lag = 64,
+ .used_max_port_per_lag = 1,
+ .max_port_per_lag = 16,
+ .used_max_mid = 1,
+ .max_mid = 7000,
+ .used_max_pgt = 1,
+ .max_pgt = 0,
+ .used_max_system_port = 1,
+ .max_system_port = 48000,
+ .used_max_vlan_groups = 1,
+ .max_vlan_groups = 127,
+ .used_max_regions = 1,
+ .max_regions = 400,
+ .used_flood_tables = 1,
+ .max_flood_tables = 2,
+ .max_vid_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
+static struct mlxsw_driver mlxsw_sx_driver = {
+ .kind = MLXSW_DEVICE_KIND_SWITCHX2,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sx),
+ .init = mlxsw_sx_init,
+ .fini = mlxsw_sx_fini,
+ .txhdr_construct = mlxsw_sx_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sx_config_profile,
+};
+
+static int __init mlxsw_sx_module_init(void)
+{
+ return mlxsw_core_driver_register(&mlxsw_sx_driver);
+}
+
+static void __exit mlxsw_sx_module_exit(void)
+{
+ mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+}
+
+module_init(mlxsw_sx_module_init);
+module_exit(mlxsw_sx_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644
index 000000000000..53a9550be75e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/trap.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+ /* Ethernet EMAD and FDB miss */
+ MLXSW_TRAP_ID_FDB_MC = 0x01,
+ MLXSW_TRAP_ID_ETHEMAD = 0x05,
+ /* L2 traps for specific packet types */
+ MLXSW_TRAP_ID_STP = 0x10,
+ MLXSW_TRAP_ID_LACP = 0x11,
+ MLXSW_TRAP_ID_EAPOL = 0x12,
+ MLXSW_TRAP_ID_LLDP = 0x13,
+ MLXSW_TRAP_ID_MMRP = 0x14,
+ MLXSW_TRAP_ID_MVRP = 0x15,
+ MLXSW_TRAP_ID_RPVST = 0x16,
+ MLXSW_TRAP_ID_DHCP = 0x19,
+ MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+ MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+ MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+ MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+ MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+
+ MLXSW_TRAP_ID_MAX = 0x1FF
+};
+
+enum mlxsw_event_trap_id {
+ /* Port Up/Down event generated by hardware */
+ MLXSW_TRAP_ID_PUDE = 0x8,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644
index 000000000000..06fc46c78a0b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -0,0 +1,80 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/txheader.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+
+enum {
+ MLXSW_TXHDR_ETH_CTL,
+ MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+ MLXSW_TXHDR_ETCLASS_0,
+ MLXSW_TXHDR_ETCLASS_1,
+ MLXSW_TXHDR_ETCLASS_2,
+ MLXSW_TXHDR_ETCLASS_3,
+ MLXSW_TXHDR_ETCLASS_4,
+ MLXSW_TXHDR_ETCLASS_5,
+ MLXSW_TXHDR_ETCLASS_6,
+ MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+ MLXSW_TXHDR_RDQ_OTHER,
+ MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+ MLXSW_TXHDR_NOT_EMAD,
+ MLXSW_TXHDR_EMAD,
+};
+
+enum {
+ MLXSW_TXHDR_TYPE_DATA,
+ MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c28111749e1f..2d1b94274079 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8226,31 +8226,7 @@ static void s2io_rem_nic(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/**
- * s2io_starter - Entry point for the driver
- * Description: This function is the entry point for the driver. It verifies
- * the module loadable parameters and initializes PCI configuration space.
- */
-
-static int __init s2io_starter(void)
-{
- return pci_register_driver(&s2io_driver);
-}
-
-/**
- * s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It
- * unregisters the driver.
- */
-
-static __exit void s2io_closer(void)
-{
- pci_unregister_driver(&s2io_driver);
- DBG_PRINT(INIT_DBG, "cleanup done\n");
-}
-
-module_init(s2io_starter);
-module_exit(s2io_closer);
+module_pci_driver(s2io_driver);
static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
struct tcphdr **tcp, struct RxD_t *rxdp,
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index d89b6ed82c51..6c5997dc8afc 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1085,8 +1085,6 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp);
static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id);
-static int s2io_starter(void);
-static void s2io_closer(void);
static void s2io_tx_watchdog(struct net_device *dev);
static void s2io_set_multicast(struct net_device *dev);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 055f3763e577..06bcc734fe8d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -24,9 +24,7 @@
#include <linux/mii.h>
#include <linux/timer.h>
#include <linux/irq.h>
-
#include <linux/vmalloc.h>
-
#include <linux/io.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
@@ -39,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 62
-#define QLCNIC_LINUX_VERSIONID "5.3.62"
+#define _QLCNIC_LINUX_SUBVERSION 63
+#define QLCNIC_LINUX_VERSIONID "5.3.63"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -926,6 +924,7 @@ struct qlcnic_mac_vlan_list {
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
+#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP BIT_13
#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
@@ -2291,8 +2290,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
-#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30 0x8C30
#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
@@ -2319,7 +2319,8 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
(device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
(device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
return status;
}
@@ -2335,7 +2336,8 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
bool status;
status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
return status;
}
@@ -2351,7 +2353,8 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
- return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+ return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
}
static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 840bf36b5e9d..5ab3adf88166 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -5,14 +5,15 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
#include <linux/if_vlan.h>
#include <linux/ipv6.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/aer.h>
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+
static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
@@ -118,6 +119,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
{QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+ {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -3513,6 +3515,31 @@ out:
qlcnic_free_mbx_args(&cmd);
}
+#define QLCNIC_83XX_ADD_PORT0 BIT_0
+#define QLCNIC_83XX_ADD_PORT1 BIT_1
+#define QLCNIC_83XX_EXTENDED_MEM_SIZE 13 /* In MB */
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
+ cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+ cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "failed to issue extend iSCSI minidump capability\n");
+
+ return err;
+}
+
int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
{
u32 major, minor, sub;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 69f828eb42cf..331ae2c20f40 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/etherdevice.h>
+
#include "qlcnic_hw.h"
#define QLCNIC_83XX_BAR0_LENGTH 0x4000
@@ -626,6 +627,7 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 753ea8bad953..bf892160dd5f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1384,7 +1384,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
size_t size;
u64 addr;
- temp = kzalloc(fw->size, GFP_KERNEL);
+ temp = vzalloc(fw->size);
if (!temp) {
release_firmware(fw);
fw_info->fw = NULL;
@@ -1430,7 +1430,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
exit:
release_firmware(fw);
fw_info->fw = NULL;
- kfree(temp);
+ vfree(temp);
return ret;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 75ee9e4ced51..509b596cf1e8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -5,13 +5,13 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include "qlcnic.h"
-#include "qlcnic_hdr.h"
-
#include <linux/slab.h>
#include <net/ip.h>
#include <linux/bitops.h>
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
#define MASK(n) ((1ULL<<(n))-1)
#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index cbe2399c30a0..4bb33af8e2b3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -109,6 +109,7 @@ enum qlcnic_regs {
#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F
#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP 0x37
#define QLCNIC_INTRPT_INTX 1
#define QLCNIC_INTRPT_MSIX 3
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2f6cc423ab1d..8b08b20e8b30 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -7,11 +7,6 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
-#include "qlcnic_hw.h"
-
#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
@@ -25,6 +20,10 @@
#include <net/vxlan.h>
#endif
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
@@ -111,8 +110,9 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
static const struct pci_device_id qlcnic_pci_tbl[] = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
- ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
{0,}
@@ -1149,6 +1149,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
case PCI_DEVICE_ID_QLOGIC_QLE844X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
@@ -2403,7 +2404,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
qlcnic_free_tx_rings(adapter);
return -ENOMEM;
}
- memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
spin_lock_init(&tx_ring->tx_clean_lock);
}
@@ -2492,6 +2492,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_83xx_register_map(ahw);
break;
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
qlcnic_sriov_vf_register_map(ahw);
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 332bb8a3f430..cda9e604a95f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -5,13 +5,13 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <net/ip.h>
+
#include "qlcnic.h"
#include "qlcnic_hdr.h"
#include "qlcnic_83xx_hw.h"
#include "qlcnic_hw.h"
-#include <net/ip.h>
-
#define QLC_83XX_MINIDUMP_FLASH 0x520000
#define QLC_83XX_OCM_INDEX 3
#define QLC_83XX_PCI_INDEX 0
@@ -1388,27 +1388,60 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
netdev_info(adapter->netdev,
- "Dump data %d bytes captured, template header size %d bytes\n",
- fw_dump->size, fw_dump->tmpl_hdr_size);
+ "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
+ fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
+ fw_dump->tmpl_hdr);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
return 0;
}
+static inline bool
+qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
+{
+ /* For special adapters (with 0x8830 device ID), where iSCSI firmware
+ * dump needs to be captured as part of regular firmware dump
+ * collection process, firmware exports it's capability through
+ * capability registers
+ */
+ return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
+ (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
+}
+
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
{
u32 prev_version, current_version;
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
struct pci_dev *pdev = adapter->pdev;
+ bool extended = false;
prev_version = adapter->fw_version;
current_version = qlcnic_83xx_get_fw_version(adapter);
if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
vfree(fw_dump->tmpl_hdr);
+
+ if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+ extended = !qlcnic_83xx_extend_md_capab(adapter);
+
if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
dev_info(&pdev->dev, "Supports FW dump capability\n");
+
+ /* Once we have minidump template with extended iSCSI dump
+ * capability, update the minidump capture mask to 0x1f as
+ * per FW requirement
+ */
+ if (extended) {
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ hdr->drv_cap_mask = 0x1f;
+ fw_dump->cap_mask = 0x1f;
+ dev_info(&pdev->dev,
+ "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
+ }
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 4677b2edccca..017d8c2c8285 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -8,10 +8,11 @@
#ifndef _QLCNIC_83XX_SRIOV_H_
#define _QLCNIC_83XX_SRIOV_H_
-#include "qlcnic.h"
#include <linux/types.h>
#include <linux/pci.h>
+#include "qlcnic.h"
+
extern const u32 qlcnic_83xx_reg_tbl[];
extern const u32 qlcnic_83xx_ext_reg_tbl[];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index e6312465fe45..546cd5f1c85a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -5,10 +5,11 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <linux/types.h>
+
#include "qlcnic_sriov.h"
#include "qlcnic.h"
#include "qlcnic_83xx_hw.h"
-#include <linux/types.h>
#define QLC_BC_COMMAND 0
#define QLC_BC_RESPONSE 1
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index a29538b86edf..afd687e5e779 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -5,9 +5,10 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <linux/types.h>
+
#include "qlcnic_sriov.h"
#include "qlcnic.h"
-#include <linux/types.h>
#define QLCNIC_SRIOV_VF_MAX_MAC 7
#define QLC_VF_MIN_TX_RATE 100
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 05c28f2c6df7..ccbb04503b27 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -7,10 +7,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_hw.h"
-
#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <net/ip.h>
@@ -24,6 +20,9 @@
#include <linux/hwmon-sysfs.h>
#endif
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 8aa50ac4e2d6..a157aaaaff6a 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -658,6 +658,8 @@ struct ravb_desc {
__le32 dptr; /* Descriptor pointer */
};
+#define DPTR_ALIGN 4 /* Required descriptor pointer alignment */
+
enum DIE_DT {
/* Frame data */
DT_FMID = 0x40,
@@ -739,6 +741,7 @@ enum RAVB_QUEUE {
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
+#define NUM_TX_DESC 2 /* TX descriptors per packet */
struct ravb_tstamp_skb {
struct list_head list;
@@ -777,9 +780,9 @@ struct ravb_private {
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+ void *tx_align[NUM_TX_QUEUE];
struct sk_buff **rx_skb[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
- void **tx_buffers[NUM_TX_QUEUE];
u32 rx_over_errors;
u32 rx_fifo_errors;
struct net_device_stats stats[NUM_RX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 78849dd4ef8e..3d972d819420 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -195,12 +195,8 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
/* Free aligned TX buffers */
- if (priv->tx_buffers[q]) {
- for (i = 0; i < priv->num_tx_ring[q]; i++)
- kfree(priv->tx_buffers[q][i]);
- }
- kfree(priv->tx_buffers[q]);
- priv->tx_buffers[q] = NULL;
+ kfree(priv->tx_align[q]);
+ priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) {
ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -212,7 +208,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
if (priv->tx_ring[q]) {
ring_size = sizeof(struct ravb_tx_desc) *
- (priv->num_tx_ring[q] + 1);
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
@@ -223,11 +219,12 @@ static void ravb_ring_free(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_ex_rx_desc *rx_desc = NULL;
- struct ravb_tx_desc *tx_desc = NULL;
- struct ravb_desc *desc = NULL;
+ struct ravb_ex_rx_desc *rx_desc;
+ struct ravb_tx_desc *tx_desc;
+ struct ravb_desc *desc;
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+ int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+ NUM_TX_DESC;
dma_addr_t dma_addr;
int i;
@@ -260,11 +257,12 @@ static void ravb_ring_format(struct net_device *ndev, int q)
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
- for (i = 0; i < priv->num_tx_ring[q]; i++) {
- tx_desc = &priv->tx_ring[q][i];
+ for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
+ i++, tx_desc++) {
+ tx_desc->die_dt = DT_EEMPTY;
+ tx_desc++;
tx_desc->die_dt = DT_EEMPTY;
}
- tx_desc = &priv->tx_ring[q][i];
tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
tx_desc->die_dt = DT_LINKFIX; /* type */
@@ -285,7 +283,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
struct ravb_private *priv = netdev_priv(ndev);
struct sk_buff *skb;
int ring_size;
- void *buffer;
int i;
/* Allocate RX and TX skb rings */
@@ -305,19 +302,11 @@ static int ravb_ring_init(struct net_device *ndev, int q)
}
/* Allocate rings for the aligned buffers */
- priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
- sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
- if (!priv->tx_buffers[q])
+ priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+ DPTR_ALIGN - 1, GFP_KERNEL);
+ if (!priv->tx_align[q])
goto error;
- for (i = 0; i < priv->num_tx_ring[q]; i++) {
- buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
- if (!buffer)
- goto error;
- /* Aligned TX buffer */
- priv->tx_buffers[q][i] = buffer;
- }
-
/* Allocate all RX descriptors. */
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -329,7 +318,8 @@ static int ravb_ring_init(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
/* Allocate all TX descriptors. */
- ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
&priv->tx_desc_dma[q],
GFP_KERNEL);
@@ -439,11 +429,12 @@ static int ravb_tx_free(struct net_device *ndev, int q)
struct net_device_stats *stats = &priv->stats[q];
struct ravb_tx_desc *desc;
int free_num = 0;
- int entry = 0;
+ int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
- entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
desc = &priv->tx_ring[q][entry];
if (desc->die_dt != DT_FEMPTY)
break;
@@ -451,14 +442,18 @@ static int ravb_tx_free(struct net_device *ndev, int q)
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
- if (priv->tx_skb[q][entry]) {
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
- dev_kfree_skb_any(priv->tx_skb[q][entry]);
- priv->tx_skb[q][entry] = NULL;
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ stats->tx_packets++;
+ }
free_num++;
}
- stats->tx_packets++;
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
@@ -512,8 +507,8 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
- u16 pkt_len = 0;
u8 desc_status;
+ u16 pkt_len;
int limit;
boguscnt = min(boguscnt, *quota);
@@ -1277,44 +1272,60 @@ static void ravb_tx_timeout_work(struct work_struct *work)
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_tstamp_skb *ts_skb = NULL;
u16 q = skb_get_queue_mapping(skb);
+ struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
unsigned long flags;
u32 dma_addr;
void *buffer;
u32 entry;
+ u32 len;
spin_lock_irqsave(&priv->lock, flags);
- if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+ if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
+ NUM_TX_DESC) {
netif_err(priv, tx_queued, ndev,
"still transmitting with the full ring!\n");
netif_stop_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_BUSY;
}
- entry = priv->cur_tx[q] % priv->num_tx_ring[q];
- priv->tx_skb[q][entry] = skb;
+ entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+ priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
if (skb_put_padto(skb, ETH_ZLEN))
goto drop;
- buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
- memcpy(buffer, skb->data, skb->len);
- desc = &priv->tx_ring[q][entry];
- desc->ds_tagl = cpu_to_le16(skb->len);
- dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+ buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+ entry / NUM_TX_DESC * DPTR_ALIGN;
+ len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+ memcpy(buffer, skb->data, len);
+ dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr))
goto drop;
+
+ desc = &priv->tx_ring[q][entry];
+ desc->ds_tagl = cpu_to_le16(len);
+ desc->dptr = cpu_to_le32(dma_addr);
+
+ buffer = skb->data + len;
+ len = skb->len - len;
+ dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ goto unmap;
+
+ desc++;
+ desc->ds_tagl = cpu_to_le16(len);
desc->dptr = cpu_to_le32(dma_addr);
/* TX timestamp required */
if (q == RAVB_NC) {
ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
if (!ts_skb) {
- dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+ desc--;
+ dma_unmap_single(&ndev->dev, dma_addr, len,
DMA_TO_DEVICE);
- goto drop;
+ goto unmap;
}
ts_skb->skb = skb;
ts_skb->tag = priv->ts_skb_tag++;
@@ -1330,13 +1341,15 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Descriptor type must be set after all the above writes */
dma_wmb();
- desc->die_dt = DT_FSINGLE;
+ desc->die_dt = DT_FEND;
+ desc--;
+ desc->die_dt = DT_FSTART;
ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
- priv->cur_tx[q]++;
- if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
- !ravb_tx_free(ndev, q))
+ priv->cur_tx[q] += NUM_TX_DESC;
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
netif_stop_subqueue(ndev, q);
exit:
@@ -1344,9 +1357,12 @@ exit:
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
+unmap:
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
drop:
dev_kfree_skb_any(skb);
- priv->tx_skb[q][entry] = NULL;
+ priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
goto exit;
}
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2e7f9a2834be..cf9c0b241d07 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -202,6 +202,7 @@ enum {
ROCKER_CTRL_IPV4_MCAST,
ROCKER_CTRL_IPV6_MCAST,
ROCKER_CTRL_DFLT_BRIDGING,
+ ROCKER_CTRL_DFLT_OVS,
ROCKER_CTRL_MAX,
};
@@ -321,9 +322,21 @@ static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
return ntohs(vlan_id);
}
+static bool rocker_port_is_slave(const struct rocker_port *rocker_port,
+ const char *kind)
+{
+ return rocker_port->bridge_dev &&
+ !strcmp(rocker_port->bridge_dev->rtnl_link_ops->kind, kind);
+}
+
static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
{
- return !!rocker_port->bridge_dev;
+ return rocker_port_is_slave(rocker_port, "bridge");
+}
+
+static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
+{
+ return rocker_port_is_slave(rocker_port, "openvswitch");
}
#define ROCKER_OP_FLAG_REMOVE BIT(0)
@@ -1818,6 +1831,30 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
}
static int
+rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ int mtu = *(int *)priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
+ mtu))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
@@ -1874,6 +1911,14 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
macaddr, NULL, NULL);
}
+static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
+ int mtu)
+{
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ rocker_cmd_set_port_settings_mtu_prep,
+ &mtu, NULL, NULL);
+}
+
static int rocker_port_set_learning(struct rocker_port *rocker_port,
enum switchdev_trans trans)
{
@@ -3243,6 +3288,12 @@ static struct rocker_ctrl {
.bridge = true,
.copy_to_cpu = true,
},
+ [ROCKER_CTRL_DFLT_OVS] = {
+ /* pass all pkts up to CPU */
+ .eth_dst = zero_mac,
+ .eth_dst_mask = zero_mac,
+ .acl = true,
+ },
};
static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
@@ -3755,11 +3806,14 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port,
break;
case BR_STATE_LEARNING:
case BR_STATE_FORWARDING:
- want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+ if (!rocker_port_is_ovsed(rocker_port))
+ want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
want[ROCKER_CTRL_IPV4_MCAST] = true;
want[ROCKER_CTRL_IPV6_MCAST] = true;
if (rocker_port_is_bridged(rocker_port))
want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+ else if (rocker_port_is_ovsed(rocker_port))
+ want[ROCKER_CTRL_DFLT_OVS] = true;
else
want[ROCKER_CTRL_LOCAL_ARP] = true;
break;
@@ -3983,7 +4037,8 @@ static int rocker_port_open(struct net_device *dev)
napi_enable(&rocker_port->napi_tx);
napi_enable(&rocker_port->napi_rx);
- rocker_port_set_enable(rocker_port, true);
+ if (!dev->proto_down)
+ rocker_port_set_enable(rocker_port, true);
netif_start_queue(dev);
return 0;
@@ -4102,8 +4157,11 @@ static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data, skb_headlen(skb));
if (err)
goto nest_cancel;
- if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
- goto nest_cancel;
+ if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
+ err = skb_linearize(skb);
+ if (err)
+ goto unmap_frags;
+ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -4152,6 +4210,34 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int running = netif_running(dev);
+ int err;
+
+#define ROCKER_PORT_MIN_MTU 68
+#define ROCKER_PORT_MAX_MTU 9000
+
+ if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
+ return -EINVAL;
+
+ if (running)
+ rocker_port_stop(dev);
+
+ netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
+ if (err)
+ return err;
+
+ if (running)
+ err = rocker_port_open(dev);
+
+ return err;
+}
+
static int rocker_port_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
@@ -4167,11 +4253,23 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
return err ? -EOPNOTSUPP : 0;
}
+static int rocker_port_change_proto_down(struct net_device *dev,
+ bool proto_down)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ if (rocker_port->dev->flags & IFF_UP)
+ rocker_port_set_enable(rocker_port, !proto_down);
+ rocker_port->dev->proto_down = proto_down;
+ return 0;
+}
+
static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_open = rocker_port_open,
.ndo_stop = rocker_port_stop,
.ndo_start_xmit = rocker_port_xmit,
.ndo_set_mac_address = rocker_port_set_mac_address,
+ .ndo_change_mtu = rocker_port_change_mtu,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
@@ -4179,6 +4277,7 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_fdb_del = switchdev_port_fdb_del,
.ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
+ .ndo_change_proto_down = rocker_port_change_proto_down,
};
/********************
@@ -4445,6 +4544,7 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
if (found->key.pport != rocker_port->pport)
continue;
fdb->addr = found->key.addr;
+ fdb->ndm_state = NUD_REACHABLE;
fdb->vid = rocker_port_vlan_to_vid(rocker_port,
found->key.vlan_id);
err = obj->cb(rocker_port->dev, obj);
@@ -4726,6 +4826,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
size_t rx_len;
+ u16 rx_flags = 0;
if (!skb)
return -ENOENT;
@@ -4733,6 +4834,8 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
return -EINVAL;
+ if (attrs[ROCKER_TLV_RX_FLAGS])
+ rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
rocker_dma_rx_ring_skb_unmap(rocker, attrs);
@@ -4740,6 +4843,9 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
skb_put(skb, rx_len);
skb->protocol = eth_type_trans(skb, rocker_port->dev);
+ if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
+ skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+
rocker_port->dev->stats.rx_packets++;
rocker_port->dev->stats.rx_bytes += skb->len;
@@ -4869,7 +4975,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
NAPI_POLL_WEIGHT);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
err = register_netdev(dev);
if (err) {
@@ -4878,11 +4984,13 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
}
rocker->ports[port_number] = rocker_port;
+ switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
+
rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
if (err) {
- dev_err(&pdev->dev, "install ig port table failed\n");
+ netdev_err(rocker_port->dev, "install ig port table failed\n");
goto err_port_ig_tbl;
}
@@ -4902,6 +5010,7 @@ err_untagged_vlan:
rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
ROCKER_OP_FLAG_REMOVE);
err_port_ig_tbl:
+ rocker->ports[port_number] = NULL;
unregister_netdev(dev);
err_register_netdev:
free_netdev(dev);
@@ -5157,6 +5266,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port,
rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
rocker_port->bridge_dev = bridge;
+ switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
untagged_vid, 0);
@@ -5177,6 +5287,8 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
rocker_port_internal_vlan_id_get(rocker_port,
rocker_port->dev->ifindex);
+ switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
+ false);
rocker_port->bridge_dev = NULL;
err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
@@ -5191,23 +5303,39 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
return err;
}
+
+static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
+ struct net_device *master)
+{
+ int err;
+
+ rocker_port->bridge_dev = master;
+
+ err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+ if (err)
+ return err;
+ err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+
+ return err;
+}
+
static int rocker_port_master_changed(struct net_device *dev)
{
struct rocker_port *rocker_port = netdev_priv(dev);
struct net_device *master = netdev_master_upper_dev_get(dev);
int err = 0;
- /* There are currently three cases handled here:
- * 1. Joining a bridge
- * 2. Leaving a previously joined bridge
- * 3. Other, e.g. being added to or removed from a bond or openvswitch,
- * in which case nothing is done
- */
- if (master && master->rtnl_link_ops &&
- !strcmp(master->rtnl_link_ops->kind, "bridge"))
- err = rocker_port_bridge_join(rocker_port, master);
- else if (rocker_port_is_bridged(rocker_port))
+ /* N.B: Do nothing if the type of master is not supported */
+ if (master && master->rtnl_link_ops) {
+ if (!strcmp(master->rtnl_link_ops->kind, "bridge"))
+ err = rocker_port_bridge_join(rocker_port, master);
+ else if (!strcmp(master->rtnl_link_ops->kind, "openvswitch"))
+ err = rocker_port_ovs_changed(rocker_port, master);
+ } else if (rocker_port_is_bridged(rocker_port)) {
err = rocker_port_bridge_leave(rocker_port);
+ } else if (rocker_port_is_ovsed(rocker_port)) {
+ err = rocker_port_ovs_changed(rocker_port, NULL);
+ }
return err;
}
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index c61fbf968036..12490b2f6504 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -159,6 +159,7 @@ enum {
ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */
__ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
@@ -245,6 +246,7 @@ enum {
#define ROCKER_RX_FLAGS_TCP BIT(5)
#define ROCKER_RX_FLAGS_UDP BIT(6)
#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7)
+#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8)
enum {
ROCKER_TLV_TX_UNSPEC,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 605cc8948594..06b8061f1b42 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -49,6 +49,12 @@ enum {
*/
#define HUNT_FILTER_TBL_ROWS 8192
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+struct efx_ef10_dev_addr {
+ u8 addr[ETH_ALEN];
+ u16 id;
+};
+
struct efx_ef10_filter_table {
/* The RX match field masks supported by this fw & hw, in order of priority */
enum efx_filter_match_flags rx_match_flags[
@@ -69,13 +75,14 @@ struct efx_ef10_filter_table {
/* Shadow of net_device address lists, guarded by mac_lock */
#define EFX_EF10_FILTER_DEV_UC_MAX 32
#define EFX_EF10_FILTER_DEV_MC_MAX 256
- struct {
- u8 addr[ETH_ALEN];
- u16 id;
- } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
- dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
- int dev_uc_count; /* negative for PROMISC */
- int dev_mc_count; /* negative for PROMISC/ALLMULTI */
+ struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+ struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count;
+ int dev_mc_count;
+/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
+ u16 ucdef_id;
+ u16 bcast_id;
+ u16 mcdef_id;
};
/* An arbitrary search limit for the software hash table */
@@ -387,7 +394,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
* First try to enable it, then if we get EPERM, just
* ask if it's already enabled
*/
- rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
if (rc == 0) {
nic_data->workaround_35388 = true;
} else if (rc == -EPERM) {
@@ -984,12 +991,24 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+#ifdef CONFIG_SFC_SRIOV
+ unsigned int i;
+#endif
/* All our allocations have been reset */
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* Driver-created vswitches and vports must be re-created */
+ nic_data->must_probe_vswitching = true;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+#ifdef CONFIG_SFC_SRIOV
+ if (nic_data->vf)
+ for (i = 0; i < efx->vf_count; i++)
+ nic_data->vf[i].vport_id = 0;
+#endif
}
static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
@@ -1034,6 +1053,12 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
{
int rc = efx_mcdi_reset(efx, reset_type);
+ /* Unprivileged functions return -EPERM, but need to return success
+ * here so that the datapath is brought back up.
+ */
+ if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
+ rc = 0;
+
/* If it was a port reset, trigger reallocation of MC resources.
* Note that on an MC reset nothing needs to be done now because we'll
* detect the MC reset later and handle it then.
@@ -1558,10 +1583,6 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
efx_ef10_reset_mc_allocations(efx);
- /* Driver-created vswitches and vports must be re-created */
- nic_data->must_probe_vswitching = true;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
-
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
@@ -2197,6 +2218,29 @@ static int efx_ef10_ev_probe(struct efx_channel *channel)
GFP_KERNEL);
}
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
static int efx_ef10_ev_init(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf,
@@ -2208,6 +2252,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
struct efx_ef10_nic_data *nic_data;
bool supports_rx_merge;
size_t inlen, outlen;
+ unsigned int enabled, implemented;
dma_addr_t dma_addr;
int rc;
int i;
@@ -2248,30 +2293,52 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
/* IRQ return is ignored */
- return rc;
-}
-
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = channel->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
+ if (channel->channel || rc)
+ return rc;
- if (rc && rc != -EALREADY)
+ /* Successfully created event queue on channel 0 */
+ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+ if (rc == -ENOSYS) {
+ /* GET_WORKAROUNDS was implemented before the bug26807
+ * workaround, thus the latter must be unavailable in this fw
+ */
+ nic_data->workaround_26807 = false;
+ rc = 0;
+ } else if (rc) {
goto fail;
+ } else {
+ nic_data->workaround_26807 =
+ !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
+
+ if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
+ !nic_data->workaround_26807) {
+ unsigned int flags;
+
+ rc = efx_mcdi_set_workaround(efx,
+ MC_CMD_WORKAROUND_BUG26807,
+ true, &flags);
+
+ if (!rc) {
+ if (flags &
+ 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+ netif_info(efx, drv, efx->net_dev,
+ "other functions on NIC have been reset\n");
+ /* MC's boot count has incremented */
+ ++nic_data->warm_boot_count;
+ }
+ nic_data->workaround_26807 = true;
+ } else if (rc == -EPERM) {
+ rc = 0;
+ }
+ }
+ }
- return;
+ if (!rc)
+ return 0;
fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
- outbuf, outlen, rc);
+ efx_ef10_ev_fini(channel);
+ return rc;
}
static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -3225,6 +3292,19 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
filter_id, false);
}
+static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
+{
+ return filter_id % HUNT_FILTER_TBL_ROWS;
+}
+
+static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx_ef10_filter_remove_internal(efx, 1U << priority,
+ filter_id, true);
+}
+
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *spec)
@@ -3598,6 +3678,10 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
goto fail;
}
+ table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+ table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+ table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+
efx->filter_state = table;
init_waitqueue_head(&table->waitq);
return 0;
@@ -3700,145 +3784,233 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
+ if (id != EFX_EF10_FILTER_ID_INVALID) { \
+ filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
+ WARN_ON(!table->entry[filter_idx].spec); \
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
+ }
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct efx_filter_spec spec;
- bool remove_failed = false;
- struct netdev_hw_addr *uc;
- struct netdev_hw_addr *mc;
- unsigned int filter_idx;
- int i, n, rc;
-
- if (!efx_dev_registered(efx))
- return;
+ unsigned int filter_idx, i;
if (!table)
return;
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
- n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
- for (i = 0; i < n; i++) {
- filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
- }
- n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
- for (i = 0; i < n; i++) {
- filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
- }
+ for (i = 0; i < table->dev_uc_count; i++)
+ EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+ for (i = 0; i < table->dev_mc_count; i++)
+ EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
+ EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
+ EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
+ EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
spin_unlock_bh(&efx->filter_lock);
+}
+#undef EFX_EF10_FILTER_DO_MARK_OLD
- /* Copy/convert the address lists; add the primary station
- * address and broadcast address
- */
- netif_addr_lock_bh(net_dev);
- if (net_dev->flags & IFF_PROMISC ||
- netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
- table->dev_uc_count = -1;
- } else {
- table->dev_uc_count = 1 + netdev_uc_count(net_dev);
- ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
- i = 1;
- netdev_for_each_uc_addr(uc, net_dev) {
- ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
- i++;
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *uc;
+ int addr_count;
+ unsigned int i;
+
+ table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+ addr_count = netdev_uc_count(net_dev);
+ if (net_dev->flags & IFF_PROMISC)
+ *promisc = true;
+ table->dev_uc_count = 1 + addr_count;
+ ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ *promisc = true;
+ break;
}
+ ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+ table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+ i++;
}
- if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
- netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
- table->dev_mc_count = -1;
- } else {
- table->dev_mc_count = 1 + netdev_mc_count(net_dev);
- eth_broadcast_addr(table->dev_mc_list[0].addr);
- i = 1;
- netdev_for_each_mc_addr(mc, net_dev) {
- ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
- i++;
+}
+
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *mc;
+ unsigned int i, addr_count;
+
+ table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+ *promisc = true;
+
+ addr_count = netdev_mc_count(net_dev);
+ i = 0;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ *promisc = true;
+ break;
}
+ ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+ table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+ i++;
}
- netif_addr_unlock_bh(net_dev);
- /* Insert/renew unicast filters */
- if (table->dev_uc_count >= 0) {
- for (i = 0; i < table->dev_uc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->dev_uc_list[i].addr);
- rc = efx_ef10_filter_insert(efx, &spec, true);
- if (rc < 0) {
- /* Fall back to unicast-promisc */
- while (i--)
- efx_ef10_filter_remove_safe(
+ table->dev_mc_count = i;
+}
+
+static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
+ bool multicast, bool rollback)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_dev_addr *addr_list;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ unsigned int i, j;
+ int addr_count;
+ int rc;
+
+ if (multicast) {
+ addr_list = table->dev_mc_list;
+ addr_count = table->dev_mc_count;
+ } else {
+ addr_list = table->dev_uc_list;
+ addr_count = table->dev_uc_count;
+ }
+
+ /* Insert/renew filters */
+ for (i = 0; i < addr_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ addr_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ if (rollback) {
+ netif_info(efx, drv, efx->net_dev,
+ "efx_ef10_filter_insert failed rc=%d\n",
+ rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+ continue;
+ efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- table->dev_uc_list[i].id);
- table->dev_uc_count = -1;
- break;
+ addr_list[j].id);
+ addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
+ } else {
+ /* mark as not inserted, and carry on */
+ rc = EFX_EF10_FILTER_ID_INVALID;
}
- table->dev_uc_list[i].id = rc;
}
+ addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
}
- if (table->dev_uc_count < 0) {
+
+ if (multicast && rollback) {
+ /* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS,
0);
- efx_filter_set_uc_def(&spec);
+ eth_broadcast_addr(baddr);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- WARN_ON(1);
- table->dev_uc_count = 0;
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n", rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+ continue;
+ efx_ef10_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ addr_list[j].id);
+ addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
} else {
- table->dev_uc_list[0].id = rc;
+ table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
- /* Insert/renew multicast filters */
- if (table->dev_mc_count >= 0) {
- for (i = 0; i < table->dev_mc_count; i++) {
+ return 0;
+}
+
+static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
+ bool rollback)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ int rc;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+
+ if (multicast)
+ efx_filter_set_mc_def(&spec);
+ else
+ efx_filter_set_uc_def(&spec);
+
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "%scast mismatch filter insert failed rc=%d\n",
+ multicast ? "Multi" : "Uni", rc);
+ } else if (multicast) {
+ table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ if (!nic_data->workaround_26807) {
+ /* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS,
0);
+ eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->dev_mc_list[i].addr);
+ baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- /* Fall back to multicast-promisc */
- while (i--)
- efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_AUTO,
- table->dev_mc_list[i].id);
- table->dev_mc_count = -1;
- break;
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n",
+ rc);
+ if (rollback) {
+ /* Roll back the mc_def filter */
+ efx_ef10_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ table->mcdef_id);
+ table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ return rc;
+ }
+ } else {
+ table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
}
- table->dev_mc_list[i].id = rc;
- }
- }
- if (table->dev_mc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
- efx_filter_set_mc_def(&spec);
- rc = efx_ef10_filter_insert(efx, &spec, true);
- if (rc < 0) {
- WARN_ON(1);
- table->dev_mc_count = 0;
- } else {
- table->dev_mc_list[0].id = rc;
}
+ rc = 0;
+ } else {
+ table->ucdef_id = rc;
+ rc = 0;
}
+ return rc;
+}
+
+/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
+ * flag or removes these filters, we don't need to hold the filter_lock while
+ * scanning for these filters.
+ */
+static void efx_ef10_filter_remove_old(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ bool remove_failed = false;
+ int i;
- /* Remove filters that weren't renewed. Since nothing else
- * changes the AUTO_OLD flag or removes these filters, we
- * don't need to hold the filter_lock while scanning for
- * these filters.
- */
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@@ -3917,6 +4089,87 @@ reset_nic:
return rc ? rc : rc2;
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ bool uc_promisc = false, mc_promisc = false;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ if (!table)
+ return;
+
+ efx_ef10_filter_mark_old(efx);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
+ efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
+ netif_addr_unlock_bh(net_dev);
+
+ /* Insert/renew unicast filters */
+ if (uc_promisc) {
+ efx_ef10_filter_insert_def(efx, false, false);
+ efx_ef10_filter_insert_addr_list(efx, false, false);
+ } else {
+ /* If any of the filters failed to insert, fall back to
+ * promiscuous mode - add in the uc_def filter. But keep
+ * our individual unicast filters.
+ */
+ if (efx_ef10_filter_insert_addr_list(efx, false, false))
+ efx_ef10_filter_insert_def(efx, false, false);
+ }
+
+ /* Insert/renew multicast filters */
+ /* If changing promiscuous state with cascaded multicast filters, remove
+ * old filters first, so that packets are dropped rather than duplicated
+ */
+ if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+ efx_ef10_filter_remove_old(efx);
+ if (mc_promisc) {
+ if (nic_data->workaround_26807) {
+ /* If we failed to insert promiscuous filters, rollback
+ * and fall back to individual multicast filters
+ */
+ if (efx_ef10_filter_insert_def(efx, true, true)) {
+ /* Changing promisc state, so remove old filters */
+ efx_ef10_filter_remove_old(efx);
+ efx_ef10_filter_insert_addr_list(efx, true, false);
+ }
+ } else {
+ /* If we failed to insert promiscuous filters, don't
+ * rollback. Regardless, also insert the mc_list
+ */
+ efx_ef10_filter_insert_def(efx, true, false);
+ efx_ef10_filter_insert_addr_list(efx, true, false);
+ }
+ } else {
+ /* If any filters failed to insert, rollback and fall back to
+ * promiscuous mode - mc_def filter and maybe broadcast. If
+ * that fails, roll back again and insert as many of our
+ * individual multicast filters as we can.
+ */
+ if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+ /* Changing promisc state, so remove old filters */
+ if (nic_data->workaround_26807)
+ efx_ef10_filter_remove_old(efx);
+ if (efx_ef10_filter_insert_def(efx, true, true))
+ efx_ef10_filter_insert_addr_list(efx, true, false);
+ }
+ }
+
+ efx_ef10_filter_remove_old(efx);
+ efx->mc_promisc = mc_promisc;
+}
+
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -4085,6 +4338,8 @@ efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
out:
+ if (rc == -EPERM)
+ rc = 0;
rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
return rc ? rc : rc2;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 81640f8bb811..98d172b04f71 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1779,15 +1779,31 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
return rc;
}
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+ unsigned int *flags)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
+ size_t outlen;
+ int rc;
BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
- return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (!flags)
+ return 0;
+
+ if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+ *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
+ else
+ *flags = 0;
+
+ return 0;
}
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
@@ -1816,7 +1832,11 @@ int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
return 0;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ /* Older firmware lacks GET_WORKAROUNDS and this isn't especially
+ * terrifying. The call site will have to deal with it though.
+ */
+ netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
+ efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 1838afe2da92..025d504c472b 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -346,7 +346,8 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+ unsigned int *flags);
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
unsigned int *enabled_out);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 45fca9fc66b7..4cc772164a79 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -26,6 +26,10 @@
* Unlike a warm boot, assume DMEM has been reloaded, so that
* the MC persistent data must be reinitialised. */
#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
/* BIST state has been initialized */
#define MC_FW_BIST_INIT_OK (128)
@@ -169,6 +173,8 @@
#define MC_CMD_ERR_EINTR 4
/* I/O failure */
#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
/* Try again */
#define MC_CMD_ERR_EAGAIN 11
/* Out of memory */
@@ -181,6 +187,10 @@
#define MC_CMD_ERR_ENODEV 19
/* Invalid argument to target */
#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
/* Out of range */
#define MC_CMD_ERR_ERANGE 34
/* Non-recursive resource is already acquired */
@@ -226,6 +236,43 @@
#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
/* The datapath is disabled. */
#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
#define MC_CMD_ERR_CODE_OFST 0
@@ -275,6 +322,11 @@
MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
(n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
/* Version 2 adds an optional argument to error returns: the errno value
* may be followed by the (0-based) number of the first argument that
@@ -394,6 +446,8 @@
#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
/* enum: DDR ECC status update */
#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -408,6 +462,16 @@
#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -416,6 +480,8 @@
#define MCDI_EVENT_EV_CODE_WIDTH 4
#define MCDI_EVENT_CODE_LBN 44
#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
/* enum: Bad assert. */
#define MCDI_EVENT_CODE_BADSSERT 0x1
/* enum: PM Notice. */
@@ -470,6 +536,14 @@
#define MCDI_EVENT_CODE_MC_BIST 0x19
/* enum: PTP tick event providing current NIC time */
#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -537,6 +611,33 @@
/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -581,6 +682,10 @@
#define FCDI_EVENT_CODE_PTP_TICK 0x7
/* enum: ECC error counters */
#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -594,11 +699,24 @@
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
* to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -631,6 +749,90 @@
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
/***********************************/
/* MC_CMD_READ32
@@ -687,24 +889,34 @@
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
-/* Source address */
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-/* enum: The main image should be entered via a copy of a single word from and
- * to this address when none of the other magic behaviours are required.
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
*/
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to start the datapath CPUs.
- * This is useful for certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
*/
#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to parse any configuration from
- * flash. (In addition, the datapath CPUs will not be started, as for
- * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
- * certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
*/
#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
@@ -795,6 +1007,10 @@
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
@@ -802,7 +1018,8 @@
/***********************************/
/* MC_CMD_LOG_CTRL
- * Configure the output stream for various events and messages.
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
*/
#define MC_CMD_LOG_CTRL 0x7
@@ -816,6 +1033,7 @@
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -955,8 +1173,12 @@
* input on the same NIC.
*/
#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x1b
+#define MC_CMD_PTP_OP_MAX 0x1c
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -1191,8 +1413,12 @@
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
-/* Event queue to send PTP time events to */
+/* Original field containing queue ID. Now extended to include flags. */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
@@ -1214,6 +1440,23 @@
/* 1 to enable PPS test mode, 0 to disable and return result. */
#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1375,7 +1618,7 @@
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
-#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
/* Time format required/used by for this NIC. Applies to all PTP MCDI
* operations that pass times between the host and firmware. If this operation
* is not supported (older firmware) a format of seconds and nanoseconds should
@@ -1396,6 +1639,13 @@
* end and start times minus the time that the MC waited for host end.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
@@ -1415,6 +1665,9 @@
/* Enum values, see field(s): */
/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -1915,6 +2168,14 @@
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
/* enum: Only this option is allowed for non-admin functions */
#define MC_CMD_FW_DONT_CARE 0xffffffff
@@ -2481,6 +2742,12 @@
#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
/* enum: Near side of AOE Siena side port */
#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2552,12 +2819,8 @@
#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-/* enum: Flow control is off. */
-#define MC_CMD_FCNTL_OFF 0x0
-/* enum: Respond to flow control. */
-#define MC_CMD_FCNTL_RESPOND 0x1
-/* enum: Respond to and Issue flow control. */
-#define MC_CMD_FCNTL_BIDIR 0x2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -2632,7 +2895,7 @@
#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
/* MC_CMD_SET_MAC_IN msgrequest */
-#define MC_CMD_SET_MAC_IN_LEN 24
+#define MC_CMD_SET_MAC_IN_LEN 28
/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
* EtherII, VLAN, bug16011 padding).
*/
@@ -2649,13 +2912,20 @@
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
/* enum: Flow control is off. */
-/* MC_CMD_FCNTL_OFF 0x0 */
+#define MC_CMD_FCNTL_OFF 0x0
/* enum: Respond to flow control. */
-/* MC_CMD_FCNTL_RESPOND 0x1 */
+#define MC_CMD_FCNTL_RESPOND 0x1
/* enum: Respond to and Issue flow control. */
-/* MC_CMD_FCNTL_BIDIR 0x2 */
+#define MC_CMD_FCNTL_BIDIR 0x2
/* enum: Auto neg flow control. */
#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -2748,7 +3018,8 @@
* guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
* performed, and the statistics may be read from the message response. If
* DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
- * Locks required: None. Returns: 0, ETIME
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
*/
#define MC_CMD_MAC_STATS 0x2e
@@ -2791,6 +3062,7 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
@@ -2890,8 +3162,8 @@
* PM_AND_RXDP_COUNTERS capability only.
*/
#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
-/* enum: RXDP counter: Number of times an emergency descriptor fetch was
- * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
@@ -3213,6 +3485,8 @@
#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
/* enum: FC Log. */
#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
/***********************************/
@@ -3407,6 +3681,8 @@
*/
#define MC_CMD_SCHEDINFO 0x3e
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SCHEDINFO_IN msgrequest */
#define MC_CMD_SCHEDINFO_IN_LEN 0
@@ -3593,6 +3869,68 @@
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
/* enum: Hotpoint temperature: degC */
#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -3701,6 +4039,8 @@
#define MC_CMD_SENSOR_STATE_BROKEN 0x3
/* enum: Sensor is working but does not currently have a reading. */
#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
@@ -3870,6 +4210,7 @@
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
/* enum: Bug 17230 work around. */
#define MC_CMD_WORKAROUND_BUG17230 0x1
@@ -3877,11 +4218,38 @@
#define MC_CMD_WORKAROUND_BUG35388 0x2
/* enum: Bug35017 workaround (A64 tables must be identity map) */
#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
#define MC_CMD_WORKAROUND_OUT_LEN 0
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
@@ -4093,7 +4461,7 @@
/***********************************/
/* MC_CMD_GET_MAC_ADDRESSES
- * Returns the base MAC, count and stride for the requestiong function
+ * Returns the base MAC, count and stride for the requesting function
*/
#define MC_CMD_GET_MAC_ADDRESSES 0x55
@@ -4115,6 +4483,527 @@
/* Spacing of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
/* enum: Any */
#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
@@ -4203,6 +5092,30 @@
#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -4218,66 +5131,69 @@
#define LICENSED_APP_ID_LEN 4
#define LICENSED_APP_ID_ID_OFST 0
/* enum: OpenOnload */
-#define LICENSED_APP_ID_ONLOAD 0x1
+#define LICENSED_APP_ID_ONLOAD 0x1
/* enum: PTP timestamping */
-#define LICENSED_APP_ID_PTP 0x2
+#define LICENSED_APP_ID_PTP 0x2
/* enum: SolarCapture Pro */
-#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
-
-/***********************************/
-/* MC_CMD_GET_WORKAROUNDS
- * Read the list of all implemented and all currently enabled workarounds. The
- * enums here must correspond with those in MC_CMD_WORKAROUND.
- */
-#define MC_CMD_GET_WORKAROUNDS 0x59
-
-/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
-#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
-/* Each workaround is represented by a single bit according to the enums below.
- */
-#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
-#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
-/* enum: Bug 17230 work around. */
-#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
-/* enum: Bug 35388 work around (unsafe EVQ writes). */
-#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
-/* enum: Bug35017 workaround (A64 tables must be identity map) */
-#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
-
-
-/***********************************/
-/* MC_CMD_LINK_STATE_MODE
- * Read/set link state mode of a VF
- */
-#define MC_CMD_LINK_STATE_MODE 0x5c
-
-#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
-#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
-/* The target function to have its link state mode read or set, must be a VF
- * e.g. VF 1,3 = 0x00030001
- */
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
-/* New link state mode to be set */
-#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
-/* enum: Use this value to just read the existing setting without modifying it.
- */
-#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
-
-/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
-#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
-#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
/***********************************/
@@ -4413,7 +5329,9 @@
#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_INIT_RXQ_IN msgrequest */
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
#define MC_CMD_INIT_RXQ_IN_LENMIN 36
#define MC_CMD_INIT_RXQ_IN_LENMAX 252
#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
@@ -4456,9 +5374,73 @@
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
/* MC_CMD_INIT_RXQ_OUT msgresponse */
#define MC_CMD_INIT_RXQ_OUT_LEN 0
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
/***********************************/
/* MC_CMD_INIT_TXQ
@@ -4467,7 +5449,9 @@
#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_INIT_TXQ_IN msgrequest */
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
#define MC_CMD_INIT_TXQ_IN_LENMIN 36
#define MC_CMD_INIT_TXQ_IN_LENMAX 252
#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
@@ -4499,6 +5483,10 @@
#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4511,6 +5499,60 @@
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
/* MC_CMD_INIT_TXQ_OUT msgresponse */
#define MC_CMD_INIT_TXQ_OUT_LEN 0
@@ -4617,6 +5659,132 @@
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
/***********************************/
/* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4688,6 +5856,44 @@
/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+/* PORT_CONFIG_ENTRY structuredef */
+#define PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define PORT_CONFIG_ENTRY_CORE_OFST 1
+#define PORT_CONFIG_ENTRY_CORE_LEN 1
+#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
+#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
+#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
+#define PORT_CONFIG_ENTRY_CORE_LBN 8
+#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define PORT_CONFIG_ENTRY_LANES_OFST 4
+#define PORT_CONFIG_ENTRY_LANES_LBN 32
+#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
/***********************************/
/* MC_CMD_FILTER_OP
@@ -4759,9 +5965,9 @@
#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
/* enum: receive to MC */
#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
-/* enum: loop back to port 0 TX MAC */
+/* enum: loop back to TXDP 0 */
#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
-/* enum: loop back to port 1 TX MAC */
+/* enum: loop back to TXDP 1 */
#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
@@ -4778,9 +5984,7 @@
#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
* RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
- * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
- * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
- * a valid handle.
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
/* transmit domain (reserved; set to 0) */
@@ -4835,6 +6039,235 @@
#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
/* MC_CMD_FILTER_OP_OUT msgresponse */
#define MC_CMD_FILTER_OP_OUT_LEN 12
/* identifies the type of operation requested */
@@ -4849,6 +6282,27 @@
#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
/***********************************/
@@ -4865,6 +6319,10 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
/* enum: read the list of supported RX filter matches */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -4884,6 +6342,17 @@
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
/***********************************/
/* MC_CMD_PARSER_DISP_RW
@@ -4901,8 +6370,10 @@
#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
/* enum: TX dispatcher CPU */
#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine */
+/* enum: Lookup engine (with original metadata format) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
/* enum: read a word of DICPU DMEM or a LUE entry */
@@ -4919,6 +6390,8 @@
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
/* value to write (for LUE writes) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -5019,7 +6492,9 @@
/* The maximum number of VIs that would be useful */
#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
-/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
#define MC_CMD_ALLOC_VIS_OUT_LEN 8
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
@@ -5028,6 +6503,17 @@
*/
#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
/***********************************/
/* MC_CMD_FREE_VIS
@@ -5114,13 +6600,15 @@
#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
/***********************************/
@@ -5575,6 +7063,7 @@
#define MC_CMD_GET_CAPABILITIES 0xbe
#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
@@ -5582,6 +7071,20 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
@@ -5600,8 +7103,14 @@
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5609,6 +7118,10 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
/* enum: Low latency RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
/* enum: RXDP Test firmware image 2 */
@@ -5632,6 +7145,10 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
/* enum: Low latency TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
@@ -5642,22 +7159,69 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
/* Licensed capabilities */
@@ -5735,6 +7299,15 @@
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -5753,8 +7326,14 @@
#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-/* bitmask of the priority queues this txq is inserted into */
+/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
/* an already reserved bucket (typically set to bucket associated with outer
@@ -5768,6 +7347,35 @@
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -5826,13 +7434,23 @@
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
/* enum: VEB */
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
-/* enum: VEPA */
+/* enum: VEPA (obsolete) */
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
/* Flags controlling v-port creation */
#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to support. */
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
@@ -5892,7 +7510,10 @@
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to insert/remove. */
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
@@ -6136,8 +7757,13 @@
/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
-/* The handle of the new RSS context */
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
/***********************************/
@@ -6249,7 +7875,11 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags */
+/* Hash control flags. The _EN bits are always supported. The _MODE bits only
+ * work when the firmware reports ADDITIONAL_RSS_MODES in
+ * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
+ * See the RSS_MODE structure for the meaning of the mode bits.
+ */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6259,6 +7889,20 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
@@ -6279,7 +7923,12 @@
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags */
+/* Hash control flags. If any _MODE bits are non-zero (which will only be true
+ * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
+ * disregarded (but are guaranteed to be consistent with the _MODE bits if
+ * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
+ * allocated).
+ */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6289,6 +7938,20 @@
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
/***********************************/
@@ -6311,8 +7974,13 @@
/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping */
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
/***********************************/
@@ -6421,375 +8089,6 @@
/***********************************/
-/* MC_CMD_RMON_RX_CLASS_STATS
- * Retrieve rmon rx class statistics
- */
-#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
-
-/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_CLASS_STATS
- * Retrieve rmon tx class statistics
- */
-#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
-
-/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
- * Retrieve rmon rx super_class statistics
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
- * Retrieve rmon tx super_class statistics
- */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_CLASS
- * Allocate an rmon class
- */
-#define MC_CMD_RMON_ALLOC_CLASS 0xca
-
-/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
-#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
-/* class */
-#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_CLASS
- * Deallocate an rmon class
- */
-#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
-
-/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
-#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
-/* class */
-#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS
- * Allocate an rmon super_class
- */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
-/* super_class */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
- * Deallocate an rmon tx super_class
- */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
-/* super_class */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_UP_CONV_STATS
- * Retrieve up converter statistics
- */
-#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPI_STATS
- * Retrieve rx ipi stats
- */
-#define MC_CMD_RMON_RX_IPI_STATS 0xcf
-
-/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
-#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
-#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
- * Retrieve rx ipsec cntxt_ptr indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
- * Retrieve rx ipsec port indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
/* MC_CMD_VPORT_ADD_MAC_ADDRESS
* Add a MAC address to a v-port
*/
@@ -6877,7 +8176,7 @@
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
@@ -6921,354 +8220,6 @@
/***********************************/
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
- * Retrieve rx class drop stats
- */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
- * Retrieve rx super class drop stats
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPI_STATS
- * Retrieve tx ipi stats
- */
-#define MC_CMD_RMON_TX_IPI_STATS 0xd7
-
-/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
-#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
-#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
- * Retrieve tx ipsec counters by cntxt_ptr
- */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
- * Retrieve tx ipsec counters by port
- */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_STATS
- * Retrieve tx nowhere stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
- * Retrieve tx nowhere qbb stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
-/* class */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
-/* super_class */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
/* MC_CMD_GET_CLOCK
* Return the system and PDCPU clock frequencies.
*/
@@ -7296,22 +8247,66 @@
#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_SET_CLOCK_IN msgrequest */
-#define MC_CMD_SET_CLOCK_IN_LEN 12
-/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the system clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for inter-core clock domain */
#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for DPCPU clock domain */
#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for MC clock domain */
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
/* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define MC_CMD_SET_CLOCK_OUT_LEN 12
+#define MC_CMD_SET_CLOCK_OUT_LEN 28
/* Resulting system frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
/* Resulting inter-core frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
/* Resulting DPCPU frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+/* Resulting PCS frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting MC frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+/* Resulting rmon frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+/* Resulting vswitch frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
/***********************************/
@@ -7325,12 +8320,22 @@
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-/* enum: RxDPCPU */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
+/* enum: RxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
/* enum: TxDPCPU0 */
#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
/* enum: TxDPCPU1 */
#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
* initialised to zero
*/
@@ -7418,6 +8423,25 @@
/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_CAP_BLK_READ
* Read multiple 64bit words from capture block memory
*/
@@ -7730,6 +8754,8 @@
* more data is returned.
*/
#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7762,20 +8788,32 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
+/* enum: Attenuation (0-15, TBD for Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15) */
+/* enum: CTLE Boost (0-15, TBD for Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
+ * for Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+/* enum: Edge DFE DLEV (TBD for Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -7865,6 +8903,8 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
/* enum: TX Slew Rate Fine control */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+/* enum: TX Termination Impedance control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -7955,6 +8995,20 @@
#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
/***********************************/
/* MC_CMD_PCIE_TUNE
@@ -8224,6 +9278,8 @@
#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
/* enum: validate application */
#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
/* arguments specific to this particular operation */
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
@@ -8258,10 +9314,22 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
/***********************************/
/* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure port sniffing for the physical port associated with the calling
+ * Configure RX port sniffing for the physical port associated with the calling
* function. Only a privileged function may change the port sniffing
* configuration. A copy of all traffic delivered to the host (non-promiscuous
* mode) or all traffic arriving at the port (promiscuous mode) may be
@@ -8299,7 +9367,7 @@
/***********************************/
/* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current port sniffing configuration for the physical port
+ * Obtain the current RX port sniffing configuration for the physical port
* associated with the calling function. Only a privileged function may read
* the configuration.
*/
@@ -8330,4 +9398,673 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define MC_CMD_READ_ATB_IN_LEN 16
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define MC_CMD_READ_ATB_OUT_LEN 4
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+/* Sector data */
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 47d1e3a96522..4d35313a239d 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -925,6 +925,7 @@ struct vfdi_status;
* @stats_lock: Statistics update lock. Must be held when calling
* efx_nic_type::{update,start,stop}_stats.
* @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ * @mc_promisc: Whether in multicast promiscuous mode when last changed
*
* This is stored in the private area of the &struct net_device.
*/
@@ -1072,6 +1073,7 @@ struct efx_nic {
int last_irq_cpu;
spinlock_t stats_lock;
atomic_t n_rx_noskb_drops;
+ bool mc_promisc;
};
static inline int efx_dev_registered(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 31ff9084d9a4..0b536e27d3b2 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -506,6 +506,7 @@ enum {
* @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @workaround_26807: Flag: firmware supports workaround for bug 26807
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
@@ -535,6 +536,7 @@ struct efx_ef10_nic_data {
bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
+ bool workaround_26807;
bool must_check_datapath_caps;
u32 datapath_caps;
unsigned int rx_dpcpu_fw_id;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index b605dfd5c7bc..9d78830da609 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -114,7 +114,10 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
if (efx->type->test_nvram) {
rc = efx->type->test_nvram(efx);
- tests->nvram = rc ? -1 : 1;
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ tests->nvram = rc ? -1 : 1;
}
return rc;
@@ -253,6 +256,12 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ netif_info(efx, drv, efx->net_dev,
+ "%s phy selftest\n", rc ? "Failed" : "Passed");
+
return rc;
}
@@ -661,6 +670,9 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
wmb();
kfree(state);
+ if (rc == -EPERM)
+ rc = 0;
+
return rc;
}
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index b323b9167526..b2f886d90429 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1042,9 +1042,5 @@ const struct efx_nic_type siena_a0_nic_type = {
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
.hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index e817a1a44379..b1e5f24708c9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -16,6 +16,46 @@
#include "stmmac.h"
#include "stmmac_platform.h"
+static int dwmac_generic_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ if (pdev->dev.of_node) {
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+ } else {
+ plat_dat = dev_get_platdata(&pdev->dev);
+ if (!plat_dat) {
+ dev_err(&pdev->dev, "no platform data provided\n");
+ return -EINVAL;
+ }
+
+ /* Set default value for multicast hash bins */
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat_dat->unicast_filter_entries = 1;
+ }
+
+ /* Custom initialisation (if needed) */
+ if (plat_dat->init) {
+ ret = plat_dat->init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
@@ -27,7 +67,7 @@ static const struct of_device_id dwmac_generic_match[] = {
MODULE_DEVICE_TABLE(of, dwmac_generic_match);
static struct platform_driver dwmac_generic_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = dwmac_generic_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = STMMAC_RESOURCE_NAME,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index f0e4bb4e3ec5..9d89bdbf029f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -248,23 +248,40 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return NULL;
}
-static void *ipq806x_gmac_setup(struct platform_device *pdev)
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct ipq806x_gmac *gmac = priv;
+
+ ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static int ipq806x_gmac_probe(struct platform_device *pdev)
{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
struct ipq806x_gmac *gmac;
int val;
void *err;
+ val = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (val)
+ return val;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
gmac->pdev = pdev;
err = ipq806x_gmac_of_parse(gmac);
- if (err) {
+ if (IS_ERR(err)) {
dev_err(dev, "device tree parsing error\n");
- return err;
+ return PTR_ERR(err);
}
regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -285,7 +302,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- return NULL;
+ return -EINVAL;
}
regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
@@ -304,7 +321,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- return NULL;
+ return -EINVAL;
}
regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
@@ -327,30 +344,21 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
}
- return gmac;
-}
+ plat_dat->has_gmac = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
-{
- struct ipq806x_gmac *gmac = priv;
-
- ipq806x_gmac_set_speed(gmac, speed);
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
-static const struct stmmac_of_data ipq806x_gmac_data = {
- .has_gmac = 1,
- .setup = ipq806x_gmac_setup,
- .fix_mac_speed = ipq806x_gmac_fix_mac_speed,
-};
-
static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
- { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+ { .compatible = "qcom,ipq806x-gmac" },
{ }
};
MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
static struct platform_driver ipq806x_gmac_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = ipq806x_gmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "ipq806x-gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index cb888d3ebbdc..78e9d1861896 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -25,66 +25,53 @@
# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0
# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4
-struct lpc18xx_dwmac_priv_data {
+static int lpc18xx_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
struct regmap *reg;
- int interface;
-};
+ u8 ethmode;
+ int ret;
-static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
-{
- struct lpc18xx_dwmac_priv_data *dwmac;
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
- dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return ERR_PTR(-ENOMEM);
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
- dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
- if (dwmac->interface < 0)
- return ERR_PTR(dwmac->interface);
+ plat_dat->has_gmac = true;
- dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
- if (IS_ERR(dwmac->reg)) {
- dev_err(&pdev->dev, "Syscon lookup failed\n");
- return dwmac->reg;
+ reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(reg)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ return PTR_ERR(reg);
}
- return dwmac;
-}
-
-static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
-{
- struct lpc18xx_dwmac_priv_data *dwmac = priv;
- u8 ethmode;
-
- if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+ if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
return -EINVAL;
}
- regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+ regmap_update_bits(reg, LPC18XX_CREG_CREG6,
LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
-static const struct stmmac_of_data lpc18xx_dwmac_data = {
- .has_gmac = 1,
- .setup = lpc18xx_dwmac_setup,
- .init = lpc18xx_dwmac_init,
-};
-
static const struct of_device_id lpc18xx_dwmac_match[] = {
- { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+ { .compatible = "nxp,lpc1850-dwmac" },
{ }
};
MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
static struct platform_driver lpc18xx_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = lpc18xx_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "lpc18xx-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 61a324a87d09..c1bac1912b37 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -47,36 +47,45 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, dwmac->reg);
}
-static void *meson6_dwmac_setup(struct platform_device *pdev)
+static int meson6_dwmac_probe(struct platform_device *pdev)
{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
struct meson_dwmac *dwmac;
struct resource *res;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dwmac->reg))
- return ERR_CAST(dwmac->reg);
+ return PTR_ERR(dwmac->reg);
- return dwmac;
-}
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
-static const struct stmmac_of_data meson6_dwmac_data = {
- .setup = meson6_dwmac_setup,
- .fix_mac_speed = meson6_dwmac_fix_mac_speed,
-};
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id meson6_dwmac_match[] = {
- { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+ { .compatible = "amlogic,meson6-dwmac" },
{ }
};
MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
static struct platform_driver meson6_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = meson6_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "meson6-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 00a1e1e09d4f..11baa4b19779 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,7 +46,7 @@ struct rk_priv_data {
struct platform_device *pdev;
int phy_iface;
struct regulator *regulator;
- struct rk_gmac_ops *ops;
+ const struct rk_gmac_ops *ops;
bool clk_enabled;
bool clock_input;
@@ -177,7 +177,7 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
}
}
-struct rk_gmac_ops rk3288_ops = {
+static const struct rk_gmac_ops rk3288_ops = {
.set_to_rgmii = rk3288_set_to_rgmii,
.set_to_rmii = rk3288_set_to_rmii,
.set_rgmii_speed = rk3288_set_rgmii_speed,
@@ -289,7 +289,7 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
}
}
-struct rk_gmac_ops rk3368_ops = {
+static const struct rk_gmac_ops rk3368_ops = {
.set_to_rgmii = rk3368_set_to_rgmii,
.set_to_rmii = rk3368_set_to_rmii,
.set_rgmii_speed = rk3368_set_rgmii_speed,
@@ -448,7 +448,7 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
}
static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
- struct rk_gmac_ops *ops)
+ const struct rk_gmac_ops *ops)
{
struct rk_priv_data *bsp_priv;
struct device *dev = &pdev->dev;
@@ -529,16 +529,6 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
return bsp_priv;
}
-static void *rk3288_gmac_setup(struct platform_device *pdev)
-{
- return rk_gmac_setup(pdev, &rk3288_ops);
-}
-
-static void *rk3368_gmac_setup(struct platform_device *pdev)
-{
- return rk_gmac_setup(pdev, &rk3368_ops);
-}
-
static int rk_gmac_init(struct platform_device *pdev, void *priv)
{
struct rk_priv_data *bsp_priv = priv;
@@ -576,31 +566,52 @@ static void rk_fix_speed(void *priv, unsigned int speed)
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
-static const struct stmmac_of_data rk3288_gmac_data = {
- .has_gmac = 1,
- .fix_mac_speed = rk_fix_speed,
- .setup = rk3288_gmac_setup,
- .init = rk_gmac_init,
- .exit = rk_gmac_exit,
-};
+static int rk_gmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ const struct rk_gmac_ops *data;
+ int ret;
-static const struct stmmac_of_data rk3368_gmac_data = {
- .has_gmac = 1,
- .fix_mac_speed = rk_fix_speed,
- .setup = rk3368_gmac_setup,
- .init = rk_gmac_init,
- .exit = rk_gmac_exit,
-};
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no of match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->has_gmac = true;
+ plat_dat->init = rk_gmac_init;
+ plat_dat->exit = rk_gmac_exit;
+ plat_dat->fix_mac_speed = rk_fix_speed;
+
+ plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
+ if (IS_ERR(plat_dat->bsp_priv))
+ return PTR_ERR(plat_dat->bsp_priv);
+
+ ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id rk_gmac_dwmac_match[] = {
- { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
- { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+ { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+ { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ }
};
MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
static struct platform_driver rk_gmac_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = rk_gmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "rk_gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 8141c5b844ae..401383b252a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -175,31 +175,6 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
return 0;
}
-static void *socfpga_dwmac_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret;
- struct socfpga_dwmac *dwmac;
-
- dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return ERR_PTR(-ENOMEM);
-
- ret = socfpga_dwmac_parse_data(dwmac, dev);
- if (ret) {
- dev_err(dev, "Unable to parse OF data\n");
- return ERR_PTR(ret);
- }
-
- ret = socfpga_dwmac_setup(dwmac);
- if (ret) {
- dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
- return ERR_PTR(ret);
- }
-
- return dwmac;
-}
-
static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct socfpga_dwmac *dwmac = priv;
@@ -257,21 +232,58 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-static const struct stmmac_of_data socfpga_gmac_data = {
- .setup = socfpga_dwmac_probe,
- .init = socfpga_dwmac_init,
- .exit = socfpga_dwmac_exit,
- .fix_mac_speed = socfpga_dwmac_fix_mac_speed,
-};
+static int socfpga_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+ struct socfpga_dwmac *dwmac;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ ret = socfpga_dwmac_parse_data(dwmac, dev);
+ if (ret) {
+ dev_err(dev, "Unable to parse OF data\n");
+ return ret;
+ }
+
+ ret = socfpga_dwmac_setup(dwmac);
+ if (ret) {
+ dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+ return ret;
+ }
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->init = socfpga_dwmac_init;
+ plat_dat->exit = socfpga_dwmac_exit;
+ plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+
+ ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id socfpga_dwmac_match[] = {
- { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+ { .compatible = "altr,socfpga-stmmac" },
{ }
};
MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = socfpga_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a2e8111c5d14..7f6f4a4fcc70 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include "stmmac_platform.h"
@@ -128,6 +129,11 @@ struct sti_dwmac {
struct device *dev;
struct regmap *regmap;
u32 speed;
+ void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+struct sti_dwmac_of_data {
+ void (*fix_retime_src)(void *priv, unsigned int speed);
};
static u32 phy_intf_sels[] = {
@@ -222,8 +228,9 @@ static void stid127_fix_retime_src(void *priv, u32 spd)
regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
}
-static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct sti_dwmac *dwmac = priv;
struct regmap *regmap = dwmac->regmap;
int iface = dwmac->interface;
struct device *dev = dwmac->dev;
@@ -241,28 +248,8 @@ static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
regmap_update_bits(regmap, reg, ENMII_MASK, val);
-}
-
-static int stix4xx_init(struct platform_device *pdev, void *priv)
-{
- struct sti_dwmac *dwmac = priv;
- u32 spd = dwmac->speed;
-
- sti_dwmac_ctrl_init(dwmac);
-
- stih4xx_fix_retime_src(priv, spd);
-
- return 0;
-}
-static int stid127_init(struct platform_device *pdev, void *priv)
-{
- struct sti_dwmac *dwmac = priv;
- u32 spd = dwmac->speed;
-
- sti_dwmac_ctrl_init(dwmac);
-
- stid127_fix_retime_src(priv, spd);
+ dwmac->fix_retime_src(priv, dwmac->speed);
return 0;
}
@@ -334,36 +321,58 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
-static void *sti_dwmac_setup(struct platform_device *pdev)
+static int sti_dwmac_probe(struct platform_device *pdev)
{
+ struct plat_stmmacenet_data *plat_dat;
+ const struct sti_dwmac_of_data *data;
+ struct stmmac_resources stmmac_res;
struct sti_dwmac *dwmac;
int ret;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "No OF match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = sti_dwmac_parse_data(dwmac, pdev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
- return ERR_PTR(ret);
+ return ret;
}
- return dwmac;
+ dwmac->fix_retime_src = data->fix_retime_src;
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->init = sti_dwmac_init;
+ plat_dat->exit = sti_dwmac_exit;
+ plat_dat->fix_mac_speed = data->fix_retime_src;
+
+ ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
-static const struct stmmac_of_data stih4xx_dwmac_data = {
- .fix_mac_speed = stih4xx_fix_retime_src,
- .setup = sti_dwmac_setup,
- .init = stix4xx_init,
- .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+ .fix_retime_src = stih4xx_fix_retime_src,
};
-static const struct stmmac_of_data stid127_dwmac_data = {
- .fix_mac_speed = stid127_fix_retime_src,
- .setup = sti_dwmac_setup,
- .init = stid127_init,
- .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stid127_dwmac_data = {
+ .fix_retime_src = stid127_fix_retime_src,
};
static const struct of_device_id sti_dwmac_match[] = {
@@ -376,7 +385,7 @@ static const struct of_device_id sti_dwmac_match[] = {
MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = sti_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "sti-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 15048ca39759..52b8ed9bd87c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -33,35 +33,6 @@ struct sunxi_priv_data {
struct regulator *regulator;
};
-static void *sun7i_gmac_setup(struct platform_device *pdev)
-{
- struct sunxi_priv_data *gmac;
- struct device *dev = &pdev->dev;
-
- gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
- if (!gmac)
- return ERR_PTR(-ENOMEM);
-
- gmac->interface = of_get_phy_mode(dev->of_node);
-
- gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
- if (IS_ERR(gmac->tx_clk)) {
- dev_err(dev, "could not get tx clock\n");
- return gmac->tx_clk;
- }
-
- /* Optional regulator for PHY */
- gmac->regulator = devm_regulator_get_optional(dev, "phy");
- if (IS_ERR(gmac->regulator)) {
- if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
- return ERR_PTR(-EPROBE_DEFER);
- dev_info(dev, "no regulator found\n");
- gmac->regulator = NULL;
- }
-
- return gmac;
-}
-
#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
#define SUN7I_GMAC_MII_RATE 25000000
@@ -132,25 +103,67 @@ static void sun7i_fix_speed(void *priv, unsigned int speed)
}
}
-/* of_data specifying hardware features and callbacks.
- * hardware features were copied from Allwinner drivers. */
-static const struct stmmac_of_data sun7i_gmac_data = {
- .has_gmac = 1,
- .tx_coe = 1,
- .fix_mac_speed = sun7i_fix_speed,
- .setup = sun7i_gmac_setup,
- .init = sun7i_gmac_init,
- .exit = sun7i_gmac_exit,
-};
+static int sun7i_gmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->interface = of_get_phy_mode(dev->of_node);
+
+ gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+ if (IS_ERR(gmac->tx_clk)) {
+ dev_err(dev, "could not get tx clock\n");
+ return PTR_ERR(gmac->tx_clk);
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ /* platform data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers. */
+ plat_dat->tx_coe = 1;
+ plat_dat->has_gmac = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->init = sun7i_gmac_init;
+ plat_dat->exit = sun7i_gmac_exit;
+ plat_dat->fix_mac_speed = sun7i_fix_speed;
+
+ ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id sun7i_dwmac_match[] = {
- { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+ { .compatible = "allwinner,sun7i-a20-gmac" },
{ }
};
MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
static struct platform_driver sun7i_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = sun7i_gmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "sun7i-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index bcdc8955c719..d02691ba3d7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -104,32 +104,16 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
* this function is to read the driver parameters from device-tree and
* set some private fields that will be used by the main at runtime.
*/
-static int stmmac_probe_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat,
- const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
{
struct device_node *np = pdev->dev.of_node;
+ struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
- const struct of_device_id *device;
- struct device *dev = &pdev->dev;
-
- device = of_match_device(dev->driver->of_match_table, dev);
- if (device->data) {
- const struct stmmac_of_data *data = device->data;
- plat->has_gmac = data->has_gmac;
- plat->enh_desc = data->enh_desc;
- plat->tx_coe = data->tx_coe;
- plat->rx_coe = data->rx_coe;
- plat->bugged_jumbo = data->bugged_jumbo;
- plat->pmt = data->pmt;
- plat->riwt_off = data->riwt_off;
- plat->fix_mac_speed = data->fix_mac_speed;
- plat->bus_setup = data->bus_setup;
- plat->setup = data->setup;
- plat->free = data->free;
- plat->init = data->init;
- plat->exit = data->exit;
- }
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return ERR_PTR(-ENOMEM);
*mac = of_get_mac_address(np);
plat->interface = of_get_phy_mode(np);
@@ -151,7 +135,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
/* If phy-handle is not specified, check if we have a fixed-phy */
if (!plat->phy_node && of_phy_is_fixed_link(np)) {
if ((of_phy_register_fixed_link(np) < 0))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
plat->phy_node = of_node_get(np);
}
@@ -182,6 +166,12 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
*/
plat->maxmtu = JUMBO_LEN;
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
/*
* Currently only the properties needed on SPEAr600
* are provided. All other properties should be added
@@ -222,7 +212,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
GFP_KERNEL);
if (!dma_cfg) {
of_node_put(np);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
plat->dma_cfg = dma_cfg;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
@@ -240,44 +230,34 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
}
- return 0;
+ return plat;
}
#else
-static int stmmac_probe_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat,
- const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
{
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
}
#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
-/**
- * stmmac_pltfr_probe - platform driver probe.
- * @pdev: platform device pointer
- * Description: platform_device probe function. It is to allocate
- * the necessary platform resources, invoke custom helper (if required) and
- * invoke the main probe function.
- */
-int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_get_platform_resources(struct platform_device *pdev,
+ struct stmmac_resources *stmmac_res)
{
- struct stmmac_resources stmmac_res;
- int ret = 0;
struct resource *res;
- struct device *dev = &pdev->dev;
- struct plat_stmmacenet_data *plat_dat = NULL;
- memset(&stmmac_res, 0, sizeof(stmmac_res));
+ memset(stmmac_res, 0, sizeof(*stmmac_res));
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
*/
- stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
- if (stmmac_res.irq < 0) {
- if (stmmac_res.irq != -EPROBE_DEFER) {
- dev_err(dev,
+ stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
+ if (stmmac_res->irq < 0) {
+ if (stmmac_res->irq != -EPROBE_DEFER) {
+ dev_err(&pdev->dev,
"MAC IRQ configuration information not found\n");
}
- return stmmac_res.irq;
+ return stmmac_res->irq;
}
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -287,64 +267,23 @@ int stmmac_pltfr_probe(struct platform_device *pdev)
* In case the wake up interrupt is not passed from the platform
* so the driver will continue to use the mac irq (ndev->irq)
*/
- stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (stmmac_res.wol_irq < 0) {
- if (stmmac_res.wol_irq == -EPROBE_DEFER)
+ stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (stmmac_res->wol_irq < 0) {
+ if (stmmac_res->wol_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
- stmmac_res.wol_irq = stmmac_res.irq;
+ stmmac_res->wol_irq = stmmac_res->irq;
}
- stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
- if (stmmac_res.lpi_irq == -EPROBE_DEFER)
+ stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (stmmac_res->lpi_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stmmac_res.addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(stmmac_res.addr))
- return PTR_ERR(stmmac_res.addr);
-
- plat_dat = dev_get_platdata(&pdev->dev);
-
- if (!plat_dat)
- plat_dat = devm_kzalloc(&pdev->dev,
- sizeof(struct plat_stmmacenet_data),
- GFP_KERNEL);
- if (!plat_dat) {
- pr_err("%s: ERROR: no memory", __func__);
- return -ENOMEM;
- }
-
- /* Set default value for multicast hash bins */
- plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat_dat->unicast_filter_entries = 1;
-
- if (pdev->dev.of_node) {
- ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
- if (ret) {
- pr_err("%s: main dt probe failed", __func__);
- return ret;
- }
- }
+ stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
- /* Custom setup (if needed) */
- if (plat_dat->setup) {
- plat_dat->bsp_priv = plat_dat->setup(pdev);
- if (IS_ERR(plat_dat->bsp_priv))
- return PTR_ERR(plat_dat->bsp_priv);
- }
-
- /* Custom initialisation (if needed)*/
- if (plat_dat->init) {
- ret = plat_dat->init(pdev, plat_dat->bsp_priv);
- if (unlikely(ret))
- return ret;
- }
-
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return PTR_ERR_OR_ZERO(stmmac_res->addr);
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
+EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
/**
* stmmac_pltfr_remove
@@ -361,9 +300,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
- if (priv->plat->free)
- priv->plat->free(pdev, priv->plat->bsp_priv);
-
return ret;
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 71da86d7bd00..ffeb8d9e2b2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -19,7 +19,14 @@
#ifndef __STMMAC_PLATFORM_H__
#define __STMMAC_PLATFORM_H__
-int stmmac_pltfr_probe(struct platform_device *pdev);
+#include "stmmac.h"
+
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+
+int stmmac_get_platform_resources(struct platform_device *pdev,
+ struct stmmac_resources *stmmac_res);
+
int stmmac_pltfr_remove(struct platform_device *pdev);
extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 000000000000..a8f315106742
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,27 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+ bool "Synopsys devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Synopsys devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config SYNOPSYS_DWC_ETH_QOS
+ tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
+ select PHYLIB
+ select CRC32
+ select MII
+ depends on OF
+ ---help---
+ This driver supports the DWC Ethernet QoS from Synopsys
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 000000000000..7a375723fc18
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
new file mode 100644
index 000000000000..85b3326775b8
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -0,0 +1,3019 @@
+/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
+ * This version introduced a lot of changes which breaks backwards
+ * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
+ * Some fields differ between version 4.00a and 4.10a, mainly the interrupt
+ * bit fields. The driver could be made compatible with 4.00, if all relevant
+ * HW erratas are handled.
+ *
+ * The GMAC is highly configurable at synthesis time. This driver has been
+ * developed for a subset of the total available feature set. Currently
+ * it supports:
+ * - TSO
+ * - Checksum offload for RX and TX.
+ * - Energy efficient ethernet.
+ * - GMII phy interface.
+ * - The statistics module.
+ * - Single RX and TX queue.
+ *
+ * Copyright (C) 2015 Axis Communications AB.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/tcp.h>
+
+#define DRIVER_NAME "dwceqos"
+#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver"
+#define DRIVER_VERSION "0.9"
+
+#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
+
+#define DWCEQOS_LPI_TIMER_MIN 8
+#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1)
+
+#define DWCEQOS_RX_BUF_SIZE 2048
+
+#define DWCEQOS_RX_DCNT 256
+#define DWCEQOS_TX_DCNT 256
+
+#define DWCEQOS_HASH_TABLE_SIZE 64
+
+/* The size field in the DMA descriptor is 14 bits */
+#define BYTES_PER_DMA_DESC 16376
+
+/* Hardware registers */
+#define START_MAC_REG_OFFSET 0x0000
+#define MAX_MAC_REG_OFFSET 0x0bd0
+#define START_MTL_REG_OFFSET 0x0c00
+#define MAX_MTL_REG_OFFSET 0x0d7c
+#define START_DMA_REG_OFFSET 0x1000
+#define MAX_DMA_REG_OFFSET 0x117C
+
+#define REG_SPACE_SIZE 0x1800
+
+/* DMA */
+#define REG_DWCEQOS_DMA_MODE 0x1000
+#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004
+#define REG_DWCEQOS_DMA_IS 0x1008
+#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c
+
+/* DMA channel registers */
+#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100
+#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104
+#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c
+#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120
+#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130
+#define REG_DWCEQOS_DMA_CH0_IE 0x1134
+#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144
+#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c
+#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154
+#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c
+#define REG_DWCEQOS_DMA_CH0_STA 0x1160
+
+#define DWCEQOS_DMA_MODE_TXPR BIT(11)
+#define DWCEQOS_DMA_MODE_DA BIT(1)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31)
+#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0)
+#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
+ (((x) << 16) & 0x000F0000)
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
+ (((x) << 24) & 0x0F000000)
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
+ (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1)
+
+#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16)
+#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18)
+
+#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16)
+#define DWCEQOS_DMA_CH_CTRL_START BIT(0)
+#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1)
+#define DWCEQOS_DMA_CH_TX_OSP BIT(4)
+#define DWCEQOS_DMA_CH_TX_TSE BIT(12)
+
+#define DWCEQOS_DMA_CH0_IE_NIE BIT(15)
+#define DWCEQOS_DMA_CH0_IE_AIE BIT(14)
+#define DWCEQOS_DMA_CH0_IE_RIE BIT(6)
+#define DWCEQOS_DMA_CH0_IE_TIE BIT(0)
+#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12)
+#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7)
+
+#define DWCEQOS_DMA_IS_DC0IS BIT(0)
+#define DWCEQOS_DMA_IS_MTLIS BIT(16)
+#define DWCEQOS_DMA_IS_MACIS BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_TI BIT(0)
+#define DWCEQOS_DMA_CH0_IS_RI BIT(6)
+#define DWCEQOS_DMA_CH0_IS_RBU BIT(7)
+#define DWCEQOS_DMA_CH0_IS_FBE BIT(12)
+#define DWCEQOS_DMA_CH0_IS_CDE BIT(13)
+#define DWCEQOS_DMA_CH0_IS_AIS BIT(14)
+
+#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20)
+
+/* DMA descriptor bits for RX normal descriptor (read format) */
+#define DWCEQOS_DMA_RDES3_OWN BIT(31)
+#define DWCEQOS_DMA_RDES3_INTE BIT(30)
+#define DWCEQOS_DMA_RDES3_BUF2V BIT(25)
+#define DWCEQOS_DMA_RDES3_BUF1V BIT(24)
+
+/* DMA descriptor bits for RX normal descriptor (write back format) */
+#define DWCEQOS_DMA_RDES1_IPCE BIT(7)
+#define DWCEQOS_DMA_RDES3_ES BIT(15)
+#define DWCEQOS_DMA_RDES3_E_JT BIT(14)
+#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff)
+#define DWCEQOS_DMA_RDES1_PT 0x00000007
+#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0)
+#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1)
+#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
+
+/* DMA descriptor bits for TX normal descriptor (read format) */
+#define DWCEQOS_DMA_TDES2_IOC BIT(31)
+#define DWCEQOS_DMA_TDES3_OWN BIT(31)
+#define DWCEQOS_DMA_TDES3_CTXT BIT(30)
+#define DWCEQOS_DMA_TDES3_FD BIT(29)
+#define DWCEQOS_DMA_TDES3_LD BIT(28)
+#define DWCEQOS_DMA_TDES3_CIPH BIT(16)
+#define DWCEQOS_DMA_TDES3_CIPP BIT(17)
+#define DWCEQOS_DMA_TDES3_CA 0x00030000
+#define DWCEQOS_DMA_TDES3_TSE BIT(18)
+#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19)
+#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16)
+
+#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26)
+
+/* DMA channel states */
+#define DMA_TX_CH_STOPPED 0
+#define DMA_TX_CH_SUSPENDED 6
+
+#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
+
+/* MTL */
+#define REG_DWCEQOS_MTL_OPER 0x0c00
+#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c
+#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08
+#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38
+
+#define REG_DWCEQOS_MTL_IS 0x0c20
+#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00
+#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30
+#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34
+#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c
+
+#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c
+
+#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060
+
+#define DWCEQOS_MTL_TXQ_TXQEN BIT(3)
+#define DWCEQOS_MTL_TXQ_TSF BIT(1)
+#define DWCEQOS_MTL_TXQ_FTQ BIT(0)
+#define DWCEQOS_MTL_TXQ_TTC512 0x00000070
+
+#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8)
+
+#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12)
+#define DWCEQOS_MTL_RXQ_EHFC BIT(7)
+#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6)
+#define DWCEQOS_MTL_RXQ_FEP BIT(4)
+#define DWCEQOS_MTL_RXQ_FUP BIT(3)
+#define DWCEQOS_MTL_RXQ_RSF BIT(5)
+#define DWCEQOS_MTL_RXQ_RTC32 BIT(0)
+
+/* MAC */
+#define REG_DWCEQOS_MAC_CFG 0x0000
+#define REG_DWCEQOS_MAC_EXT_CFG 0x0004
+#define REG_DWCEQOS_MAC_PKT_FILT 0x0008
+#define REG_DWCEQOS_MAC_WD_TO 0x000c
+#define REG_DWCEQOS_HASTABLE_LO 0x0010
+#define REG_DWCEQOS_HASTABLE_HI 0x0014
+#define REG_DWCEQOS_MAC_IS 0x00b0
+#define REG_DWCEQOS_MAC_IE 0x00b4
+#define REG_DWCEQOS_MAC_STAT 0x00b8
+#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200
+#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204
+#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300
+#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304
+#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0
+#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c
+#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120
+#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124
+#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010
+#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014
+#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0
+#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4
+#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8
+#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc
+#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090
+#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070
+
+#define DWCEQOS_MAC_CFG_ACS BIT(20)
+#define DWCEQOS_MAC_CFG_JD BIT(17)
+#define DWCEQOS_MAC_CFG_JE BIT(16)
+#define DWCEQOS_MAC_CFG_PS BIT(15)
+#define DWCEQOS_MAC_CFG_FES BIT(14)
+#define DWCEQOS_MAC_CFG_DM BIT(13)
+#define DWCEQOS_MAC_CFG_DO BIT(10)
+#define DWCEQOS_MAC_CFG_TE BIT(1)
+#define DWCEQOS_MAC_CFG_IPC BIT(27)
+#define DWCEQOS_MAC_CFG_RE BIT(0)
+
+#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8))
+#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8))
+
+#define DWCEQOS_MAC_IS_LPI_INT BIT(5)
+#define DWCEQOS_MAC_IS_MMC_INT BIT(8)
+
+#define DWCEQOS_MAC_RXQ_EN BIT(1)
+#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_RA BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10)
+#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9)
+#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8)
+#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5)
+#define DWCEQOS_MAC_PKT_FILT_PM BIT(4)
+#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3)
+#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2)
+#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1)
+#define DWCEQOS_MAC_PKT_FILT_PR BIT(0)
+
+#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8)
+#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2
+#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3
+#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0
+#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1
+#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4
+#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2)
+#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0)
+
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
+
+#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0))
+
+#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
+ DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
+ DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
+
+#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
+
+#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
+
+/* Features */
+#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
+#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
+#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2)
+#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13)
+#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1)
+#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0)
+
+#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18)
+#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
+#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f))
+
+#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
+ (1 + (((feature1) & 0x1fc0000) >> 18))
+
+#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21)
+#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16)
+
+#define DWCEQOS_DMA_MODE_SWR BIT(0)
+
+#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
+
+/* Mac Management Counters */
+#define REG_DWCEQOS_MMC_CTRL 0x0700
+#define REG_DWCEQOS_MMC_RXIRQ 0x0704
+#define REG_DWCEQOS_MMC_TXIRQ 0x0708
+#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c
+#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710
+
+#define DWCEQOS_MMC_CTRL_CNTRST BIT(0)
+#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2)
+
+#define DWC_MMC_TXLPITRANSCNTR 0x07F0
+#define DWC_MMC_TXLPIUSCNTR 0x07EC
+#define DWC_MMC_TXOVERSIZE_G 0x0778
+#define DWC_MMC_TXVLANPACKETS_G 0x0774
+#define DWC_MMC_TXPAUSEPACKETS 0x0770
+#define DWC_MMC_TXEXCESSDEF 0x076C
+#define DWC_MMC_TXPACKETCOUNT_G 0x0768
+#define DWC_MMC_TXOCTETCOUNT_G 0x0764
+#define DWC_MMC_TXCARRIERERROR 0x0760
+#define DWC_MMC_TXEXCESSCOL 0x075C
+#define DWC_MMC_TXLATECOL 0x0758
+#define DWC_MMC_TXDEFERRED 0x0754
+#define DWC_MMC_TXMULTICOL_G 0x0750
+#define DWC_MMC_TXSINGLECOL_G 0x074C
+#define DWC_MMC_TXUNDERFLOWERROR 0x0748
+#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744
+#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740
+#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C
+#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738
+#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734
+#define DWC_MMC_TX256TO511OCTETS_GB 0x0730
+#define DWC_MMC_TX128TO255OCTETS_GB 0x072C
+#define DWC_MMC_TX65TO127OCTETS_GB 0x0728
+#define DWC_MMC_TX64OCTETS_GB 0x0724
+#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720
+#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C
+#define DWC_MMC_TXPACKETCOUNT_GB 0x0718
+#define DWC_MMC_TXOCTETCOUNT_GB 0x0714
+
+#define DWC_MMC_RXLPITRANSCNTR 0x07F8
+#define DWC_MMC_RXLPIUSCNTR 0x07F4
+#define DWC_MMC_RXCTRLPACKETS_G 0x07E4
+#define DWC_MMC_RXRCVERROR 0x07E0
+#define DWC_MMC_RXWATCHDOG 0x07DC
+#define DWC_MMC_RXVLANPACKETS_GB 0x07D8
+#define DWC_MMC_RXFIFOOVERFLOW 0x07D4
+#define DWC_MMC_RXPAUSEPACKETS 0x07D0
+#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC
+#define DWC_MMC_RXLENGTHERROR 0x07C8
+#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4
+#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0
+#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC
+#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8
+#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4
+#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0
+#define DWC_MMC_RX64OCTETS_GB 0x07AC
+#define DWC_MMC_RXOVERSIZE_G 0x07A8
+#define DWC_MMC_RXUNDERSIZE_G 0x07A4
+#define DWC_MMC_RXJABBERERROR 0x07A0
+#define DWC_MMC_RXRUNTERROR 0x079C
+#define DWC_MMC_RXALIGNMENTERROR 0x0798
+#define DWC_MMC_RXCRCERROR 0x0794
+#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790
+#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C
+#define DWC_MMC_RXOCTETCOUNT_G 0x0788
+#define DWC_MMC_RXOCTETCOUNT_GB 0x0784
+#define DWC_MMC_RXPACKETCOUNT_GB 0x0780
+
+static int debug = 3;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
+
+/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
+struct ring_desc {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ size_t len;
+};
+
+/* DMA hardware descriptor */
+struct dwceqos_dma_desc {
+ u32 des0;
+ u32 des1;
+ u32 des2;
+ u32 des3;
+} ____cacheline_aligned;
+
+struct dwceqos_mmc_counters {
+ __u64 txlpitranscntr;
+ __u64 txpiuscntr;
+ __u64 txoversize_g;
+ __u64 txvlanpackets_g;
+ __u64 txpausepackets;
+ __u64 txexcessdef;
+ __u64 txpacketcount_g;
+ __u64 txoctetcount_g;
+ __u64 txcarriererror;
+ __u64 txexcesscol;
+ __u64 txlatecol;
+ __u64 txdeferred;
+ __u64 txmulticol_g;
+ __u64 txsinglecol_g;
+ __u64 txunderflowerror;
+ __u64 txbroadcastpackets_gb;
+ __u64 txmulticastpackets_gb;
+ __u64 txunicastpackets_gb;
+ __u64 tx1024tomaxoctets_gb;
+ __u64 tx512to1023octets_gb;
+ __u64 tx256to511octets_gb;
+ __u64 tx128to255octets_gb;
+ __u64 tx65to127octets_gb;
+ __u64 tx64octets_gb;
+ __u64 txmulticastpackets_g;
+ __u64 txbroadcastpackets_g;
+ __u64 txpacketcount_gb;
+ __u64 txoctetcount_gb;
+
+ __u64 rxlpitranscntr;
+ __u64 rxlpiuscntr;
+ __u64 rxctrlpackets_g;
+ __u64 rxrcverror;
+ __u64 rxwatchdog;
+ __u64 rxvlanpackets_gb;
+ __u64 rxfifooverflow;
+ __u64 rxpausepackets;
+ __u64 rxoutofrangetype;
+ __u64 rxlengtherror;
+ __u64 rxunicastpackets_g;
+ __u64 rx1024tomaxoctets_gb;
+ __u64 rx512to1023octets_gb;
+ __u64 rx256to511octets_gb;
+ __u64 rx128to255octets_gb;
+ __u64 rx65to127octets_gb;
+ __u64 rx64octets_gb;
+ __u64 rxoversize_g;
+ __u64 rxundersize_g;
+ __u64 rxjabbererror;
+ __u64 rxrunterror;
+ __u64 rxalignmenterror;
+ __u64 rxcrcerror;
+ __u64 rxmulticastpackets_g;
+ __u64 rxbroadcastpackets_g;
+ __u64 rxoctetcount_g;
+ __u64 rxoctetcount_gb;
+ __u64 rxpacketcount_gb;
+};
+
+/* Ethtool statistics */
+
+struct dwceqos_stat {
+ const char stat_name[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define STAT_ITEM(name, var) \
+ {\
+ name,\
+ offsetof(struct dwceqos_mmc_counters, var),\
+ }
+
+static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
+ STAT_ITEM("tx_bytes", txoctetcount_gb),
+ STAT_ITEM("tx_packets", txpacketcount_gb),
+ STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
+ STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
+ STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb),
+ STAT_ITEM("tx_pause_packets", txpausepackets),
+ STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
+ STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
+ STAT_ITEM("tx_underflow_errors", txunderflowerror),
+ STAT_ITEM("tx_lpi_count", txlpitranscntr),
+
+ STAT_ITEM("rx_bytes", rxoctetcount_gb),
+ STAT_ITEM("rx_packets", rxpacketcount_gb),
+ STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
+ STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
+ STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
+ STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
+ STAT_ITEM("rx_pause_packets", rxpausepackets),
+ STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
+ STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
+ STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
+ STAT_ITEM("rx_oversize_packets", rxoversize_g),
+ STAT_ITEM("rx_undersize_packets", rxundersize_g),
+ STAT_ITEM("rx_jabbers", rxjabbererror),
+ STAT_ITEM("rx_align_errors", rxalignmenterror),
+ STAT_ITEM("rx_crc_errors", rxcrcerror),
+ STAT_ITEM("rx_lpi_count", rxlpitranscntr),
+};
+
+/* Configuration of AXI bus parameters.
+ * These values depend on the parameters set on the MAC core as well
+ * as the AXI interconnect.
+ */
+struct dwceqos_bus_cfg {
+ /* Enable AXI low-power interface. */
+ bool en_lpi;
+ /* Limit on number of outstanding AXI write requests. */
+ u32 write_requests;
+ /* Limit on number of outstanding AXI read requests. */
+ u32 read_requests;
+ /* Bitmap of allowed AXI burst lengths, 4-256 beats. */
+ u32 burst_map;
+ /* DMA Programmable burst length*/
+ u32 tx_pbl;
+ u32 rx_pbl;
+};
+
+struct dwceqos_flowcontrol {
+ int autoneg;
+ int rx;
+ int rx_current;
+ int tx;
+ int tx_current;
+};
+
+struct net_local {
+ void __iomem *baseaddr;
+ struct clk *phy_ref_clk;
+ struct clk *apb_pclk;
+
+ struct device_node *phy_node;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+
+ u32 msg_enable;
+
+ struct tasklet_struct tx_bdreclaim_tasklet;
+ struct workqueue_struct *txtimeout_handler_wq;
+ struct work_struct txtimeout_reinit;
+
+ phy_interface_t phy_interface;
+ struct phy_device *phy_dev;
+ struct mii_bus *mii_bus;
+
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+
+ struct napi_struct napi;
+
+ /* DMA Descriptor Areas */
+ struct ring_desc *rx_skb;
+ struct ring_desc *tx_skb;
+
+ struct dwceqos_dma_desc *tx_descs;
+ struct dwceqos_dma_desc *rx_descs;
+
+ /* DMA Mapped Descriptor areas*/
+ dma_addr_t tx_descs_addr;
+ dma_addr_t rx_descs_addr;
+ dma_addr_t tx_descs_tail_addr;
+ dma_addr_t rx_descs_tail_addr;
+
+ size_t tx_free;
+ size_t tx_next;
+ size_t rx_cur;
+ size_t tx_cur;
+
+ /* Spinlocks for accessing DMA Descriptors */
+ spinlock_t tx_lock;
+
+ /* Spinlock for register read-modify-writes. */
+ spinlock_t hw_lock;
+
+ u32 feature0;
+ u32 feature1;
+ u32 feature2;
+
+ struct dwceqos_bus_cfg bus_cfg;
+ bool en_tx_lpi_clockgating;
+
+ int eee_enabled;
+ int eee_active;
+ int csr_val;
+ u32 gso_size;
+
+ struct dwceqos_mmc_counters mmc_counters;
+ /* Protect the mmc_counter updates. */
+ spinlock_t stats_lock;
+ u32 mmc_rx_counters_mask;
+ u32 mmc_tx_counters_mask;
+
+ struct dwceqos_flowcontrol flowcontrol;
+};
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+ u32 tx_mask);
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+ unsigned int reg_n);
+static int dwceqos_stop(struct net_device *ndev);
+static int dwceqos_open(struct net_device *ndev);
+static void dwceqos_tx_poll_demand(struct net_local *lp);
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
+
+static void dwceqos_reset_state(struct net_local *lp);
+
+#define dwceqos_read(lp, reg) \
+ readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
+#define dwceqos_write(lp, reg, val) \
+ writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
+
+static void dwceqos_reset_state(struct net_local *lp)
+{
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = DUPLEX_UNKNOWN;
+ lp->flowcontrol.rx_current = 0;
+ lp->flowcontrol.tx_current = 0;
+ lp->eee_active = 0;
+ lp->eee_enabled = 0;
+}
+
+static void print_descriptor(struct net_local *lp, int index, int tx)
+{
+ struct dwceqos_dma_desc *dd;
+
+ if (tx)
+ dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
+ else
+ dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
+
+ pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
+ index, dd);
+ pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
+ dd->des3);
+}
+
+static void print_status(struct net_local *lp)
+{
+ size_t desci, i;
+
+ pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
+ lp->tx_cur, lp->tx_next);
+
+ print_descriptor(lp, lp->rx_cur, 0);
+
+ for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
+ i < DWCEQOS_TX_DCNT;
+ ++i) {
+ print_descriptor(lp, desci, 1);
+ desci = (desci + 1) % DWCEQOS_TX_DCNT;
+ }
+
+ pr_info("DMA_Debug_Status0: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
+ pr_info("DMA_CH0_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
+ pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
+ dwceqos_read(lp, 0x1144));
+ pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
+ dwceqos_read(lp, 0x1154));
+ pr_info("MTL_Debug_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
+ pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
+ pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
+ pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
+ dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
+}
+
+static void dwceqos_mdio_set_csr(struct net_local *lp)
+{
+ int rate = clk_get_rate(lp->apb_pclk);
+
+ if (rate <= 20000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
+ else if (rate <= 35000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
+ else if (rate <= 60000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
+ else if (rate <= 100000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
+ else if (rate <= 150000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
+ else if (rate <= 250000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
+}
+
+/* Simple MDIO functions implementing mii_bus */
+static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+ int i;
+ int data;
+
+ regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+ DWCEQOS_MDIO_PHYREG(phyreg) |
+ DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+ DWCEQOS_MAC_MDIO_ADDR_GB |
+ DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+ for (i = 0; i < 5; ++i) {
+ usleep_range(64, 128);
+ if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+ DWCEQOS_MAC_MDIO_ADDR_GB))
+ break;
+ }
+
+ data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
+ if (i == 5) {
+ netdev_warn(lp->ndev, "MDIO read timed out\n");
+ data = 0xffff;
+ }
+
+ return data & 0xffff;
+}
+
+static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+ u16 value)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+ int i;
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
+
+ regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+ DWCEQOS_MDIO_PHYREG(phyreg) |
+ DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+ DWCEQOS_MAC_MDIO_ADDR_GB |
+ DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+ for (i = 0; i < 5; ++i) {
+ usleep_range(64, 128);
+ if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+ DWCEQOS_MAC_MDIO_ADDR_GB))
+ break;
+ }
+ if (i == 5)
+ netdev_warn(lp->ndev, "MDIO write timed out\n");
+ return 0;
+}
+
+static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phy_mii_ioctl(phydev, rq, cmd);
+ default:
+ dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
+ return -EOPNOTSUPP;
+ }
+}
+
+static void dwceqos_link_down(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ /* Indicate link down to the LPI state machine */
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_link_up(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ /* Indicate link up to the LPI state machine */
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+ lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+
+ /* Check for changed EEE capability */
+ if (!lp->eee_active && lp->eee_enabled) {
+ lp->eee_enabled = 0;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ }
+}
+
+static void dwceqos_set_speed(struct net_local *lp)
+{
+ struct phy_device *phydev = lp->phy_dev;
+ u32 regval;
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+ regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
+ DWCEQOS_MAC_CFG_DM);
+
+ if (phydev->duplex)
+ regval |= DWCEQOS_MAC_CFG_DM;
+ if (phydev->speed == SPEED_10) {
+ regval |= DWCEQOS_MAC_CFG_PS;
+ } else if (phydev->speed == SPEED_100) {
+ regval |= DWCEQOS_MAC_CFG_PS |
+ DWCEQOS_MAC_CFG_FES;
+ } else if (phydev->speed != SPEED_1000) {
+ netdev_err(lp->ndev,
+ "unknown PHY speed %d\n",
+ phydev->speed);
+ return;
+ }
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
+}
+
+static void dwceqos_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+ int status_change = 0;
+
+ if (phydev->link) {
+ if ((lp->speed != phydev->speed) ||
+ (lp->duplex != phydev->duplex)) {
+ dwceqos_set_speed(lp);
+
+ lp->speed = phydev->speed;
+ lp->duplex = phydev->duplex;
+ status_change = 1;
+ }
+
+ if (lp->flowcontrol.autoneg) {
+ lp->flowcontrol.rx = phydev->pause ||
+ phydev->asym_pause;
+ lp->flowcontrol.tx = phydev->pause ||
+ phydev->asym_pause;
+ }
+
+ if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
+ if (netif_msg_link(lp))
+ netdev_dbg(ndev, "set rx flow to %d\n",
+ lp->flowcontrol.rx);
+ dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
+ lp->flowcontrol.rx_current = lp->flowcontrol.rx;
+ }
+ if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
+ if (netif_msg_link(lp))
+ netdev_dbg(ndev, "set tx flow to %d\n",
+ lp->flowcontrol.tx);
+ dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
+ lp->flowcontrol.tx_current = lp->flowcontrol.tx;
+ }
+ }
+
+ if (phydev->link != lp->link) {
+ lp->link = phydev->link;
+ status_change = 1;
+ }
+
+ if (status_change) {
+ if (phydev->link) {
+ lp->ndev->trans_start = jiffies;
+ dwceqos_link_up(lp);
+ } else {
+ dwceqos_link_down(lp);
+ }
+ phy_print_status(phydev);
+ }
+}
+
+static int dwceqos_mii_probe(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
+
+ if (lp->phy_node) {
+ phydev = of_phy_connect(lp->ndev,
+ lp->phy_node,
+ &dwceqos_adjust_link,
+ 0,
+ lp->phy_interface);
+
+ if (!phydev) {
+ netdev_err(ndev, "no PHY found\n");
+ return -1;
+ }
+ } else {
+ netdev_err(ndev, "no PHY configured\n");
+ return -ENODEV;
+ }
+
+ if (netif_msg_probe(lp))
+ netdev_dbg(lp->ndev,
+ "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n",
+ phydev, phydev->phy_id, phydev->addr);
+
+ phydev->supported &= PHY_GBIT_FEATURES;
+
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = DUPLEX_UNKNOWN;
+ lp->phy_dev = phydev;
+
+ if (netif_msg_probe(lp)) {
+ netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n",
+ lp->phy_dev->addr, lp->phy_dev->phy_id);
+
+ netdev_dbg(lp->ndev, "attach [%s] phy driver\n",
+ lp->phy_dev->drv->name);
+ }
+
+ return 0;
+}
+
+static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
+{
+ struct sk_buff *new_skb;
+ dma_addr_t new_skb_baddr = 0;
+
+ new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+ if (!new_skb) {
+ netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
+ goto err_out;
+ }
+
+ new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
+ new_skb->data, DWCEQOS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+ netdev_err(lp->ndev, "DMA map error\n");
+ dev_kfree_skb(new_skb);
+ new_skb = NULL;
+ goto err_out;
+ }
+
+ lp->rx_descs[index].des0 = new_skb_baddr;
+ lp->rx_descs[index].des1 = 0;
+ lp->rx_descs[index].des2 = 0;
+ lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
+ DWCEQOS_DMA_RDES3_BUF1V |
+ DWCEQOS_DMA_RDES3_OWN;
+
+ lp->rx_skb[index].mapping = new_skb_baddr;
+ lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
+
+err_out:
+ lp->rx_skb[index].skb = new_skb;
+}
+
+static void dwceqos_clean_rings(struct net_local *lp)
+{
+ int i;
+
+ if (lp->rx_skb) {
+ for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
+ if (lp->rx_skb[i].skb) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[i].mapping,
+ lp->rx_skb[i].len,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb(lp->rx_skb[i].skb);
+ lp->rx_skb[i].skb = NULL;
+ lp->rx_skb[i].mapping = 0;
+ }
+ }
+ }
+
+ if (lp->tx_skb) {
+ for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
+ if (lp->tx_skb[i].skb) {
+ dev_kfree_skb(lp->tx_skb[i].skb);
+ lp->tx_skb[i].skb = NULL;
+ }
+ if (lp->tx_skb[i].mapping) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->tx_skb[i].mapping,
+ lp->tx_skb[i].len,
+ DMA_TO_DEVICE);
+ lp->tx_skb[i].mapping = 0;
+ }
+ }
+ }
+}
+
+static void dwceqos_descriptor_free(struct net_local *lp)
+{
+ int size;
+
+ dwceqos_clean_rings(lp);
+
+ kfree(lp->tx_skb);
+ lp->tx_skb = NULL;
+ kfree(lp->rx_skb);
+ lp->rx_skb = NULL;
+
+ size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+ if (lp->rx_descs) {
+ dma_free_coherent(lp->ndev->dev.parent, size,
+ (void *)(lp->rx_descs), lp->rx_descs_addr);
+ lp->rx_descs = NULL;
+ }
+
+ size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+ if (lp->tx_descs) {
+ dma_free_coherent(lp->ndev->dev.parent, size,
+ (void *)(lp->tx_descs), lp->tx_descs_addr);
+ lp->tx_descs = NULL;
+ }
+}
+
+static int dwceqos_descriptor_init(struct net_local *lp)
+{
+ int size;
+ u32 i;
+
+ lp->gso_size = 0;
+
+ lp->tx_skb = NULL;
+ lp->rx_skb = NULL;
+ lp->rx_descs = NULL;
+ lp->tx_descs = NULL;
+
+ /* Reset the DMA indexes */
+ lp->rx_cur = 0;
+ lp->tx_cur = 0;
+ lp->tx_next = 0;
+ lp->tx_free = DWCEQOS_TX_DCNT;
+
+ /* Allocate Ring descriptors */
+ size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
+ lp->rx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->rx_skb)
+ goto err_out;
+
+ size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
+ lp->tx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->tx_skb)
+ goto err_out;
+
+ /* Allocate DMA descriptors */
+ size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+ lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+ &lp->rx_descs_addr, 0);
+ if (!lp->rx_descs)
+ goto err_out;
+ lp->rx_descs_tail_addr = lp->rx_descs_addr +
+ sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
+
+ size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+ lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+ &lp->tx_descs_addr, 0);
+ if (!lp->tx_descs)
+ goto err_out;
+ lp->tx_descs_tail_addr = lp->tx_descs_addr +
+ sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
+
+ /* Initialize RX Ring Descriptors and buffers */
+ for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
+ dwceqos_alloc_rxring_desc(lp, i);
+ if (!(lp->rx_skb[lp->rx_cur].skb))
+ goto err_out;
+ }
+
+ /* Initialize TX Descriptors */
+ for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
+ lp->tx_descs[i].des0 = 0;
+ lp->tx_descs[i].des1 = 0;
+ lp->tx_descs[i].des2 = 0;
+ lp->tx_descs[i].des3 = 0;
+ }
+
+ /* Make descriptor writes visible to the DMA. */
+ wmb();
+
+ return 0;
+
+err_out:
+ dwceqos_descriptor_free(lp);
+ return -ENOMEM;
+}
+
+static int dwceqos_packet_avail(struct net_local *lp)
+{
+ return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
+}
+
+static void dwceqos_get_hwfeatures(struct net_local *lp)
+{
+ lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
+ lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
+ lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
+}
+
+static void dwceqos_dma_enable_txirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval |= DWCEQOS_DMA_CH0_IE_TIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_txirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_enable_rxirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval |= DWCEQOS_DMA_CH0_IE_RIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_rxirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
+{
+ dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
+ dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
+}
+
+static int dwceqos_mii_init(struct net_local *lp)
+{
+ int ret = -ENXIO, i;
+ struct resource res;
+ struct device_node *mdionode;
+
+ mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
+
+ if (!mdionode)
+ return 0;
+
+ lp->mii_bus = mdiobus_alloc();
+ if (!lp->mii_bus) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ lp->mii_bus->name = "DWCEQOS MII bus";
+ lp->mii_bus->read = &dwceqos_mdio_read;
+ lp->mii_bus->write = &dwceqos_mdio_write;
+ lp->mii_bus->priv = lp;
+ lp->mii_bus->parent = &lp->ndev->dev;
+
+ lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!lp->mii_bus->irq) {
+ ret = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ lp->mii_bus->irq[i] = PHY_POLL;
+ of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ if (of_mdiobus_register(lp->mii_bus, mdionode))
+ goto err_out_free_mdio_irq;
+
+ return 0;
+
+err_out_free_mdio_irq:
+ kfree(lp->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(lp->mii_bus);
+err_out:
+ of_node_put(mdionode);
+ return ret;
+}
+
+/* DMA reset. When issued also resets all MTL and MAC registers as well */
+static void dwceqos_reset_hw(struct net_local *lp)
+{
+ /* Wait (at most) 0.5 seconds for DMA reset*/
+ int i = 5000;
+ u32 reg;
+
+ /* Force gigabit to guarantee a TX clock for GMII. */
+ reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+ reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
+ reg |= DWCEQOS_MAC_CFG_DM;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
+
+ do {
+ udelay(100);
+ i--;
+ reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
+ } while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
+ /* We might experience a timeout if the chip clock mux is broken */
+ if (!i)
+ netdev_err(lp->ndev, "DMA reset timed out!\n");
+}
+
+static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
+{
+ if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
+ netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
+ dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
+ "read" : "write",
+ dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
+ "descr" : "data",
+ dma_status);
+
+ print_status(lp);
+ }
+ if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
+ netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
+ dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
+ "read" : "write",
+ dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
+ "descr" : "data",
+ dma_status);
+
+ print_status(lp);
+ }
+}
+
+static void dwceqos_mmc_interrupt(struct net_local *lp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->stats_lock, flags);
+
+ /* A latched mmc interrupt can not be masked, we must read
+ * all the counters with an interrupt pending.
+ */
+ dwceqos_read_mmc_counters(lp,
+ dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
+ dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
+
+ spin_unlock_irqrestore(&lp->stats_lock, flags);
+}
+
+static void dwceqos_mac_interrupt(struct net_local *lp)
+{
+ u32 cause;
+
+ cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
+
+ if (cause & DWCEQOS_MAC_IS_MMC_INT)
+ dwceqos_mmc_interrupt(lp);
+}
+
+static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct net_local *lp = netdev_priv(ndev);
+
+ u32 cause;
+ u32 dma_status;
+ irqreturn_t ret = IRQ_NONE;
+
+ cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
+ /* DMA Channel 0 Interrupt */
+ if (cause & DWCEQOS_DMA_IS_DC0IS) {
+ dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
+
+ /* Transmit Interrupt */
+ if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
+ tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+ dwceqos_dma_disable_txirq(lp);
+ }
+
+ /* Receive Interrupt */
+ if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
+ /* Disable RX IRQs */
+ dwceqos_dma_disable_rxirq(lp);
+ napi_schedule(&lp->napi);
+ }
+
+ /* Fatal Bus Error interrupt */
+ if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
+ dwceqos_fatal_bus_error(lp, dma_status);
+
+ /* errata 9000831707 */
+ dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
+ DWCEQOS_DMA_CH0_IS_REB;
+ }
+
+ /* Ack all DMA Channel 0 IRQs */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
+ ret = IRQ_HANDLED;
+ }
+
+ if (cause & DWCEQOS_DMA_IS_MTLIS) {
+ u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
+
+ dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
+ ret = IRQ_HANDLED;
+ }
+
+ if (cause & DWCEQOS_DMA_IS_MACIS) {
+ dwceqos_mac_interrupt(lp);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
+ if (enable)
+ regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+ else
+ regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
+
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ /* MTL flow control */
+ regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+ if (enable)
+ regval |= DWCEQOS_MTL_RXQ_EHFC;
+ else
+ regval &= ~DWCEQOS_MTL_RXQ_EHFC;
+
+ dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+ /* MAC flow control */
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
+ if (enable)
+ regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+ else
+ regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_flow_control(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+ int RQS, RFD, RFA;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+
+ /* The queue size is in units of 256 bytes. We want 512 bytes units for
+ * the threshold fields.
+ */
+ RQS = ((regval >> 20) & 0x3FF) + 1;
+ RQS /= 2;
+
+ /* The thresholds are relative to a full queue, with a bias
+ * of 1 KiByte below full.
+ */
+ RFD = RQS / 2 - 2;
+ RFA = RQS / 8 - 2;
+
+ regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
+
+ if (RFD >= 0 && RFA >= 0) {
+ dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+ } else {
+ netdev_warn(lp->ndev,
+ "FIFO too small for flow control.");
+ }
+
+ regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
+ DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_clock(struct net_local *lp)
+{
+ unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
+
+ BUG_ON(!rate_mhz);
+
+ dwceqos_write(lp,
+ REG_DWCEQOS_MAC_1US_TIC_COUNTER,
+ DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
+}
+
+static void dwceqos_configure_bus(struct net_local *lp)
+{
+ u32 sysbus_reg;
+
+ /* N.B. We do not support the Fixed Burst mode because it
+ * opens a race window by making HW access to DMA descriptors
+ * non-atomic.
+ */
+
+ sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
+
+ if (lp->bus_cfg.en_lpi)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
+
+ if (lp->bus_cfg.burst_map)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+ lp->bus_cfg.burst_map);
+ else
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+ DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
+
+ if (lp->bus_cfg.read_requests)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+ lp->bus_cfg.read_requests - 1);
+ else
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+ DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
+
+ if (lp->bus_cfg.write_requests)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+ lp->bus_cfg.write_requests - 1);
+ else
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+ DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
+
+ if (netif_msg_hw(lp))
+ netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
+}
+
+static void dwceqos_init_hw(struct net_local *lp)
+{
+ u32 regval;
+ u32 buswidth;
+ u32 dma_skip;
+
+ /* Software reset */
+ dwceqos_reset_hw(lp);
+
+ dwceqos_configure_bus(lp);
+
+ /* Probe data bus width, 32/64/128 bits. */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
+ buswidth = (regval ^ 0xF) + 1;
+
+ /* Cache-align dma descriptors. */
+ dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
+ DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
+ DWCEQOS_DMA_CH_CTRL_PBLX8);
+
+ /* Initialize DMA Channel 0 */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
+ (u32)lp->tx_descs_addr);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
+ (u32)lp->rx_descs_addr);
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+ lp->tx_descs_tail_addr);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+ lp->rx_descs_tail_addr);
+
+ if (lp->bus_cfg.tx_pbl)
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
+ else
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+ /* Enable TSO if the HW support it */
+ if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+ regval |= DWCEQOS_DMA_CH_TX_TSE;
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
+
+ if (lp->bus_cfg.rx_pbl)
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
+ else
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+ regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+ regval |= DWCEQOS_DMA_CH_CTRL_START;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+ /* Initialize MTL Queues */
+ regval = DWCEQOS_MTL_SCHALG_STRICT;
+ dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
+
+ regval = DWCEQOS_MTL_TXQ_SIZE(
+ DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
+ DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
+ DWCEQOS_MTL_TXQ_TTC512;
+ dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
+
+ regval = DWCEQOS_MTL_RXQ_SIZE(
+ DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
+ DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
+ dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+ dwceqos_configure_flow_control(lp);
+
+ /* Initialize MAC */
+ dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+ lp->eee_enabled = 0;
+
+ dwceqos_configure_clock(lp);
+
+ /* MMC counters */
+
+ /* probe implemented counters */
+ dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
+ dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
+ lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
+ lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
+
+ dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
+ DWCEQOS_MMC_CTRL_RSTONRD);
+ dwceqos_enable_mmc_interrupt(lp);
+
+ /* Enable Interrupts */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+ DWCEQOS_DMA_CH0_IE_NIE |
+ DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+ DWCEQOS_DMA_CH0_IE_AIE |
+ DWCEQOS_DMA_CH0_IE_FBEE);
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
+ DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+ /* Start TX DMA */
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
+ regval | DWCEQOS_DMA_CH_CTRL_START);
+
+ /* Enable MAC TX/RX */
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
+ regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+}
+
+static void dwceqos_tx_reclaim(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct net_local *lp = netdev_priv(ndev);
+ unsigned int tx_bytes = 0;
+ unsigned int tx_packets = 0;
+
+ spin_lock(&lp->tx_lock);
+
+ while (lp->tx_free < DWCEQOS_TX_DCNT) {
+ struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
+ struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
+
+ /* Descriptor still being held by DMA ? */
+ if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
+ break;
+
+ if (rd->mapping)
+ dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(rd->skb)) {
+ ++tx_packets;
+ tx_bytes += rd->skb->len;
+ dev_consume_skb_any(rd->skb);
+ }
+
+ rd->skb = NULL;
+ rd->mapping = 0;
+ lp->tx_free++;
+ lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
+
+ if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
+ (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
+ if (netif_msg_tx_err(lp))
+ netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
+ dd->des3);
+ if (netif_msg_hw(lp))
+ print_status(lp);
+ }
+ }
+ spin_unlock(&lp->tx_lock);
+
+ netdev_completed_queue(ndev, tx_packets, tx_bytes);
+
+ dwceqos_dma_enable_txirq(lp);
+ netif_wake_queue(ndev);
+}
+
+static int dwceqos_rx(struct net_local *lp, int budget)
+{
+ struct sk_buff *skb;
+ u32 tot_size = 0;
+ unsigned int n_packets = 0;
+ unsigned int n_descs = 0;
+ u32 len;
+
+ struct dwceqos_dma_desc *dd;
+ struct sk_buff *new_skb;
+ dma_addr_t new_skb_baddr = 0;
+
+ while (n_descs < budget) {
+ if (!dwceqos_packet_avail(lp))
+ break;
+
+ new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+ if (!new_skb) {
+ netdev_err(lp->ndev, "no memory for new sk_buff\n");
+ break;
+ }
+
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
+ new_skb->data,
+ DWCEQOS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+ netdev_err(lp->ndev, "DMA map error\n");
+ dev_kfree_skb(new_skb);
+ break;
+ }
+
+ /* Read descriptor data after reading owner bit. */
+ dma_rmb();
+
+ dd = &lp->rx_descs[lp->rx_cur];
+ len = DWCEQOS_DMA_RDES3_PL(dd->des3);
+ skb = lp->rx_skb[lp->rx_cur].skb;
+
+ /* Unmap old buffer */
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[lp->rx_cur].mapping,
+ lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
+
+ /* Discard packet on reception error or bad checksum */
+ if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
+ (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ } else {
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
+ case DWCEQOS_DMA_RDES1_PT_UDP:
+ case DWCEQOS_DMA_RDES1_PT_TCP:
+ case DWCEQOS_DMA_RDES1_PT_ICMP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ }
+ }
+
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(lp))
+ netdev_dbg(lp->ndev, "rx error: des3=%X\n",
+ lp->rx_descs[lp->rx_cur].des3);
+ } else {
+ tot_size += skb->len;
+ n_packets++;
+
+ netif_receive_skb(skb);
+ }
+
+ lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
+ lp->rx_descs[lp->rx_cur].des1 = 0;
+ lp->rx_descs[lp->rx_cur].des2 = 0;
+ /* The DMA must observe des0/1/2 written before des3. */
+ wmb();
+ lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
+ DWCEQOS_DMA_RDES3_OWN |
+ DWCEQOS_DMA_RDES3_BUF1V;
+
+ lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
+ lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
+ lp->rx_skb[lp->rx_cur].skb = new_skb;
+
+ n_descs++;
+ lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
+ }
+
+ /* Make sure any ownership update is written to the descriptors before
+ * DMA wakeup.
+ */
+ wmb();
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
+ /* Wake up RX by writing tail pointer */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+ lp->rx_descs_tail_addr);
+
+ return n_descs;
+}
+
+static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct net_local *lp = container_of(napi, struct net_local, napi);
+ int work_done = 0;
+
+ work_done = dwceqos_rx(lp, budget - work_done);
+
+ if (!dwceqos_packet_avail(lp) && work_done < budget) {
+ napi_complete(napi);
+ dwceqos_dma_enable_rxirq(lp);
+ } else {
+ work_done = budget;
+ }
+
+ return work_done;
+}
+
+/* Reinitialize function if a TX timed out */
+static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
+{
+ struct net_local *lp = container_of(data, struct net_local,
+ txtimeout_reinit);
+
+ netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
+ DWCEQOS_TX_TIMEOUT);
+
+ if (netif_msg_hw(lp))
+ print_status(lp);
+
+ rtnl_lock();
+ dwceqos_stop(lp->ndev);
+ dwceqos_open(lp->ndev);
+ rtnl_unlock();
+}
+
+/* DT Probing function called by main probe */
+static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct net_local *lp;
+ const void *mac_address;
+ struct dwceqos_bus_cfg *bus_cfg;
+ struct device_node *np = pdev->dev.of_node;
+
+ ndev = platform_get_drvdata(pdev);
+ lp = netdev_priv(ndev);
+ bus_cfg = &lp->bus_cfg;
+
+ /* Set the MAC address. */
+ mac_address = of_get_mac_address(pdev->dev.of_node);
+ if (mac_address)
+ ether_addr_copy(ndev->dev_addr, mac_address);
+
+ /* These are all optional parameters */
+ lp->en_tx_lpi_clockgating = of_property_read_bool(np,
+ "snps,en-tx-lpi-clockgating");
+ bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
+ of_property_read_u32(np, "snps,write-requests",
+ &bus_cfg->write_requests);
+ of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
+ of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
+ of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
+ of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
+
+ netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
+ bus_cfg->en_lpi,
+ bus_cfg->write_requests,
+ bus_cfg->read_requests,
+ bus_cfg->burst_map,
+ bus_cfg->rx_pbl,
+ bus_cfg->tx_pbl);
+
+ return 0;
+}
+
+static int dwceqos_open(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ int res;
+
+ dwceqos_reset_state(lp);
+ res = dwceqos_descriptor_init(lp);
+ if (res) {
+ netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
+ return res;
+ }
+ netdev_reset_queue(ndev);
+
+ napi_enable(&lp->napi);
+ phy_start(lp->phy_dev);
+ dwceqos_init_hw(lp);
+
+ netif_start_queue(ndev);
+ tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+ return 0;
+}
+
+static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
+{
+ u32 reg;
+
+ reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
+ reg = DMA_GET_TX_STATE_CH0(reg);
+
+ return reg == DMA_TX_CH_SUSPENDED;
+}
+
+static void dwceqos_drain_dma(struct net_local *lp)
+{
+ /* Wait for all pending TX buffers to be sent. Upper limit based
+ * on max frame size on a 10 Mbit link.
+ */
+ size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
+
+ while (!dweqos_is_tx_dma_suspended(lp) && limit--)
+ usleep_range(100, 200);
+}
+
+static int dwceqos_stop(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ phy_stop(lp->phy_dev);
+
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+ netif_stop_queue(ndev);
+ napi_disable(&lp->napi);
+
+ dwceqos_drain_dma(lp);
+
+ netif_tx_lock(lp->ndev);
+ dwceqos_reset_hw(lp);
+ dwceqos_descriptor_free(lp);
+ netif_tx_unlock(lp->ndev);
+
+ return 0;
+}
+
+static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
+ unsigned short gso_size)
+{
+ struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
+
+ dd->des0 = 0;
+ dd->des1 = 0;
+ dd->des2 = gso_size;
+ dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
+
+ lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+}
+
+static void dwceqos_tx_poll_demand(struct net_local *lp)
+{
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+ lp->tx_descs_tail_addr);
+}
+
+struct dwceqos_tx {
+ size_t nr_descriptors;
+ size_t initial_descriptor;
+ size_t last_descriptor;
+ size_t prev_gso_size;
+ size_t network_header_len;
+};
+
+static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ size_t n = 1;
+ size_t i;
+
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
+ ++n;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
+ BYTES_PER_DMA_DESC;
+ }
+
+ tx->nr_descriptors = n;
+ tx->initial_descriptor = lp->tx_next;
+ tx->last_descriptor = lp->tx_next;
+ tx->prev_gso_size = lp->gso_size;
+
+ tx->network_header_len = skb_transport_offset(skb);
+ if (skb_is_gso(skb))
+ tx->network_header_len += tcp_hdrlen(skb);
+}
+
+static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ struct ring_desc *rd;
+ struct dwceqos_dma_desc *dd;
+ size_t payload_len;
+ dma_addr_t dma_handle;
+
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
+ dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
+ lp->gso_size = skb_shinfo(skb)->gso_size;
+ }
+
+ dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+ netdev_err(lp->ndev, "TX DMA Mapping error\n");
+ return -ENOMEM;
+ }
+
+ rd = &lp->tx_skb[lp->tx_next];
+ dd = &lp->tx_descs[lp->tx_next];
+
+ rd->skb = NULL;
+ rd->len = skb_headlen(skb);
+ rd->mapping = dma_handle;
+
+ /* Set up DMA Descriptor */
+ dd->des0 = dma_handle;
+
+ if (skb_is_gso(skb)) {
+ payload_len = skb_headlen(skb) - tx->network_header_len;
+
+ if (payload_len)
+ dd->des1 = dma_handle + tx->network_header_len;
+ dd->des2 = tx->network_header_len |
+ DWCEQOS_DMA_DES2_B2L(payload_len);
+ dd->des3 = DWCEQOS_DMA_TDES3_TSE |
+ DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
+ (skb->len - tx->network_header_len);
+ } else {
+ dd->des1 = 0;
+ dd->des2 = skb_headlen(skb);
+ dd->des3 = skb->len;
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_PARTIAL:
+ dd->des3 |= DWCEQOS_DMA_TDES3_CA;
+ case CHECKSUM_NONE:
+ case CHECKSUM_UNNECESSARY:
+ case CHECKSUM_COMPLETE:
+ default:
+ break;
+ }
+ }
+
+ dd->des3 |= DWCEQOS_DMA_TDES3_FD;
+ if (lp->tx_next != tx->initial_descriptor)
+ dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+ tx->last_descriptor = lp->tx_next;
+ lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+
+ return 0;
+}
+
+static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ struct ring_desc *rd = NULL;
+ struct dwceqos_dma_desc *dd;
+ dma_addr_t dma_handle;
+ size_t i;
+
+ /* Setup more ring and DMA descriptor if the packet is fragmented */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ size_t frag_size;
+ size_t consumed_size;
+
+ /* Map DMA Area */
+ dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+ netdev_err(lp->ndev, "DMA Mapping error\n");
+ return -ENOMEM;
+ }
+
+ /* order-3 fragments span more than one descriptor. */
+ frag_size = skb_frag_size(frag);
+ consumed_size = 0;
+ while (consumed_size < frag_size) {
+ size_t dma_size = min_t(size_t, 16376,
+ frag_size - consumed_size);
+
+ rd = &lp->tx_skb[lp->tx_next];
+ memset(rd, 0, sizeof(*rd));
+
+ dd = &lp->tx_descs[lp->tx_next];
+
+ /* Set DMA Descriptor fields */
+ dd->des0 = dma_handle;
+ dd->des1 = 0;
+ dd->des2 = dma_size;
+
+ if (skb_is_gso(skb))
+ dd->des3 = (skb->len - tx->network_header_len);
+ else
+ dd->des3 = skb->len;
+
+ dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+ tx->last_descriptor = lp->tx_next;
+ lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+ consumed_size += dma_size;
+ }
+
+ rd->len = skb_frag_size(frag);
+ rd->mapping = dma_handle;
+ }
+
+ return 0;
+}
+
+static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
+ lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
+
+ lp->tx_skb[tx->last_descriptor].skb = skb;
+
+ /* Make all descriptor updates visible to the DMA before setting the
+ * owner bit.
+ */
+ wmb();
+
+ lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+ /* Make the owner bit visible before TX wakeup. */
+ wmb();
+
+ dwceqos_tx_poll_demand(lp);
+}
+
+static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
+{
+ size_t i = tx->initial_descriptor;
+
+ while (i != lp->tx_next) {
+ if (lp->tx_skb[i].mapping)
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->tx_skb[i].mapping,
+ lp->tx_skb[i].len,
+ DMA_TO_DEVICE);
+
+ lp->tx_skb[i].mapping = 0;
+ lp->tx_skb[i].skb = NULL;
+
+ memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
+
+ i = (i + 1) % DWCEQOS_TX_DCNT;
+ }
+
+ lp->tx_next = tx->initial_descriptor;
+ lp->gso_size = tx->prev_gso_size;
+}
+
+static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct dwceqos_tx trans;
+ int err;
+
+ dwceqos_tx_prepare(skb, lp, &trans);
+ if (lp->tx_free < trans.nr_descriptors) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ err = dwceqos_tx_linear(skb, lp, &trans);
+ if (err)
+ goto tx_error;
+
+ err = dwceqos_tx_frags(skb, lp, &trans);
+ if (err)
+ goto tx_error;
+
+ WARN_ON(lp->tx_next !=
+ ((trans.initial_descriptor + trans.nr_descriptors) %
+ DWCEQOS_TX_DCNT));
+
+ dwceqos_tx_finalize(skb, lp, &trans);
+
+ netdev_sent_queue(ndev, skb->len);
+
+ spin_lock_bh(&lp->tx_lock);
+ lp->tx_free -= trans.nr_descriptors;
+ spin_unlock_bh(&lp->tx_lock);
+
+ ndev->trans_start = jiffies;
+ return 0;
+
+tx_error:
+ dwceqos_tx_rollback(lp, &trans);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* Set MAC address and then update HW accordingly */
+static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct sockaddr *hwaddr = (struct sockaddr *)addr;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
+
+ dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+ return 0;
+}
+
+static void dwceqos_tx_timeout(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+ unsigned int reg_n)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
+ data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
+}
+
+static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
+{
+ /* Do not disable MAC address 0 */
+ if (reg_n != 0)
+ dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
+}
+
+static void dwceqos_set_rx_mode(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval = 0;
+ u32 mc_filter[2];
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+ unsigned int max_mac_addr;
+
+ max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
+
+ if (ndev->flags & IFF_PROMISC) {
+ regval = DWCEQOS_MAC_PKT_FILT_PR;
+ } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
+ (ndev->flags & IFF_ALLMULTI))) {
+ regval = DWCEQOS_MAC_PKT_FILT_PM;
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
+ } else if (!netdev_mc_empty(ndev)) {
+ regval = DWCEQOS_MAC_PKT_FILT_HMC;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table
+ */
+ int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+ /* The most significant bit determines the register
+ * to use (H/L) while the other 5 bits determine
+ * the bit within the register.
+ */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
+ }
+ if (netdev_uc_count(ndev) > max_mac_addr) {
+ regval |= DWCEQOS_MAC_PKT_FILT_PR;
+ } else {
+ netdev_for_each_uc_addr(ha, ndev) {
+ dwceqos_set_umac_addr(lp, ha->addr, reg);
+ reg++;
+ }
+ for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
+ dwceqos_disable_umac_addr(lp, reg);
+ }
+ dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void dwceqos_poll_controller(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ dwceqos_interrupt(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+ u32 tx_mask)
+{
+ if (tx_mask & BIT(27))
+ lp->mmc_counters.txlpitranscntr +=
+ dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
+ if (tx_mask & BIT(26))
+ lp->mmc_counters.txpiuscntr +=
+ dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
+ if (tx_mask & BIT(25))
+ lp->mmc_counters.txoversize_g +=
+ dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
+ if (tx_mask & BIT(24))
+ lp->mmc_counters.txvlanpackets_g +=
+ dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
+ if (tx_mask & BIT(23))
+ lp->mmc_counters.txpausepackets +=
+ dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
+ if (tx_mask & BIT(22))
+ lp->mmc_counters.txexcessdef +=
+ dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
+ if (tx_mask & BIT(21))
+ lp->mmc_counters.txpacketcount_g +=
+ dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
+ if (tx_mask & BIT(20))
+ lp->mmc_counters.txoctetcount_g +=
+ dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
+ if (tx_mask & BIT(19))
+ lp->mmc_counters.txcarriererror +=
+ dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
+ if (tx_mask & BIT(18))
+ lp->mmc_counters.txexcesscol +=
+ dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
+ if (tx_mask & BIT(17))
+ lp->mmc_counters.txlatecol +=
+ dwceqos_read(lp, DWC_MMC_TXLATECOL);
+ if (tx_mask & BIT(16))
+ lp->mmc_counters.txdeferred +=
+ dwceqos_read(lp, DWC_MMC_TXDEFERRED);
+ if (tx_mask & BIT(15))
+ lp->mmc_counters.txmulticol_g +=
+ dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
+ if (tx_mask & BIT(14))
+ lp->mmc_counters.txsinglecol_g +=
+ dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
+ if (tx_mask & BIT(13))
+ lp->mmc_counters.txunderflowerror +=
+ dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
+ if (tx_mask & BIT(12))
+ lp->mmc_counters.txbroadcastpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
+ if (tx_mask & BIT(11))
+ lp->mmc_counters.txmulticastpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
+ if (tx_mask & BIT(10))
+ lp->mmc_counters.txunicastpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
+ if (tx_mask & BIT(9))
+ lp->mmc_counters.tx1024tomaxoctets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
+ if (tx_mask & BIT(8))
+ lp->mmc_counters.tx512to1023octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
+ if (tx_mask & BIT(7))
+ lp->mmc_counters.tx256to511octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
+ if (tx_mask & BIT(6))
+ lp->mmc_counters.tx128to255octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
+ if (tx_mask & BIT(5))
+ lp->mmc_counters.tx65to127octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
+ if (tx_mask & BIT(4))
+ lp->mmc_counters.tx64octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
+ if (tx_mask & BIT(3))
+ lp->mmc_counters.txmulticastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
+ if (tx_mask & BIT(2))
+ lp->mmc_counters.txbroadcastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
+ if (tx_mask & BIT(1))
+ lp->mmc_counters.txpacketcount_gb +=
+ dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
+ if (tx_mask & BIT(0))
+ lp->mmc_counters.txoctetcount_gb +=
+ dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
+
+ if (rx_mask & BIT(27))
+ lp->mmc_counters.rxlpitranscntr +=
+ dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
+ if (rx_mask & BIT(26))
+ lp->mmc_counters.rxlpiuscntr +=
+ dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
+ if (rx_mask & BIT(25))
+ lp->mmc_counters.rxctrlpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
+ if (rx_mask & BIT(24))
+ lp->mmc_counters.rxrcverror +=
+ dwceqos_read(lp, DWC_MMC_RXRCVERROR);
+ if (rx_mask & BIT(23))
+ lp->mmc_counters.rxwatchdog +=
+ dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
+ if (rx_mask & BIT(22))
+ lp->mmc_counters.rxvlanpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
+ if (rx_mask & BIT(21))
+ lp->mmc_counters.rxfifooverflow +=
+ dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
+ if (rx_mask & BIT(20))
+ lp->mmc_counters.rxpausepackets +=
+ dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
+ if (rx_mask & BIT(19))
+ lp->mmc_counters.rxoutofrangetype +=
+ dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
+ if (rx_mask & BIT(18))
+ lp->mmc_counters.rxlengtherror +=
+ dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
+ if (rx_mask & BIT(17))
+ lp->mmc_counters.rxunicastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
+ if (rx_mask & BIT(16))
+ lp->mmc_counters.rx1024tomaxoctets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
+ if (rx_mask & BIT(15))
+ lp->mmc_counters.rx512to1023octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
+ if (rx_mask & BIT(14))
+ lp->mmc_counters.rx256to511octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
+ if (rx_mask & BIT(13))
+ lp->mmc_counters.rx128to255octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
+ if (rx_mask & BIT(12))
+ lp->mmc_counters.rx65to127octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
+ if (rx_mask & BIT(11))
+ lp->mmc_counters.rx64octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
+ if (rx_mask & BIT(10))
+ lp->mmc_counters.rxoversize_g +=
+ dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
+ if (rx_mask & BIT(9))
+ lp->mmc_counters.rxundersize_g +=
+ dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
+ if (rx_mask & BIT(8))
+ lp->mmc_counters.rxjabbererror +=
+ dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
+ if (rx_mask & BIT(7))
+ lp->mmc_counters.rxrunterror +=
+ dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
+ if (rx_mask & BIT(6))
+ lp->mmc_counters.rxalignmenterror +=
+ dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
+ if (rx_mask & BIT(5))
+ lp->mmc_counters.rxcrcerror +=
+ dwceqos_read(lp, DWC_MMC_RXCRCERROR);
+ if (rx_mask & BIT(4))
+ lp->mmc_counters.rxmulticastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
+ if (rx_mask & BIT(3))
+ lp->mmc_counters.rxbroadcastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
+ if (rx_mask & BIT(2))
+ lp->mmc_counters.rxoctetcount_g +=
+ dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
+ if (rx_mask & BIT(1))
+ lp->mmc_counters.rxoctetcount_gb +=
+ dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
+ if (rx_mask & BIT(0))
+ lp->mmc_counters.rxpacketcount_gb +=
+ dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
+}
+
+static struct rtnl_link_stats64*
+dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+{
+ unsigned long flags;
+ struct net_local *lp = netdev_priv(ndev);
+ struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
+
+ spin_lock_irqsave(&lp->stats_lock, flags);
+ dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+ lp->mmc_tx_counters_mask);
+ spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+ s->rx_packets = hwstats->rxpacketcount_gb;
+ s->rx_bytes = hwstats->rxoctetcount_gb;
+ s->rx_errors = hwstats->rxpacketcount_gb -
+ hwstats->rxbroadcastpackets_g -
+ hwstats->rxmulticastpackets_g -
+ hwstats->rxunicastpackets_g;
+ s->multicast = hwstats->rxmulticastpackets_g;
+ s->rx_length_errors = hwstats->rxlengtherror;
+ s->rx_crc_errors = hwstats->rxcrcerror;
+ s->rx_fifo_errors = hwstats->rxfifooverflow;
+
+ s->tx_packets = hwstats->txpacketcount_gb;
+ s->tx_bytes = hwstats->txoctetcount_gb;
+
+ if (lp->mmc_tx_counters_mask & BIT(21))
+ s->tx_errors = hwstats->txpacketcount_gb -
+ hwstats->txpacketcount_g;
+ else
+ s->tx_errors = hwstats->txunderflowerror +
+ hwstats->txcarriererror;
+
+ return s;
+}
+
+static int
+dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+ const struct net_local *lp = netdev_priv(ndev);
+
+ strcpy(ed->driver, lp->pdev->dev.driver->name);
+ strcpy(ed->version, DRIVER_VERSION);
+}
+
+static void dwceqos_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ const struct net_local *lp = netdev_priv(ndev);
+
+ pp->autoneg = lp->flowcontrol.autoneg;
+ pp->tx_pause = lp->flowcontrol.tx;
+ pp->rx_pause = lp->flowcontrol.rx;
+}
+
+static int dwceqos_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ int ret = 0;
+
+ lp->flowcontrol.autoneg = pp->autoneg;
+ if (pp->autoneg) {
+ lp->phy_dev->advertising |= ADVERTISED_Pause;
+ lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+ } else {
+ lp->phy_dev->advertising &= ~ADVERTISED_Pause;
+ lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+ lp->flowcontrol.rx = pp->rx_pause;
+ lp->flowcontrol.tx = pp->tx_pause;
+ }
+
+ if (netif_running(ndev))
+ ret = phy_start_aneg(lp->phy_dev);
+
+ return ret;
+}
+
+static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
+ u8 *data)
+{
+ size_t i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+ memcpy(data, dwceqos_ethtool_stats[i].stat_name,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static void dwceqos_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ unsigned long flags;
+ size_t i;
+ u8 *mmcstat = (u8 *)&lp->mmc_counters;
+
+ spin_lock_irqsave(&lp->stats_lock, flags);
+ dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+ lp->mmc_tx_counters_mask);
+ spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+ memcpy(data,
+ mmcstat + dwceqos_ethtool_stats[i].offset,
+ sizeof(u64));
+ data++;
+ }
+}
+
+static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(dwceqos_ethtool_stats);
+
+ return -EOPNOTSUPP;
+}
+
+static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *space)
+{
+ const struct net_local *lp = netdev_priv(dev);
+ u32 *reg_space = (u32 *)space;
+ int reg_offset;
+ int reg_ix = 0;
+
+ /* MAC registers */
+ for (reg_offset = START_MAC_REG_OFFSET;
+ reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+ reg_ix++;
+ }
+ /* MTL registers */
+ for (reg_offset = START_MTL_REG_OFFSET;
+ reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+ reg_ix++;
+ }
+
+ /* DMA registers */
+ for (reg_offset = START_DMA_REG_OFFSET;
+ reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+ reg_ix++;
+ }
+
+ BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
+}
+
+static int dwceqos_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
+{
+ return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
+}
+
+static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
+{
+ return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
+}
+
+static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 lpi_status;
+ u32 lpi_enabled;
+
+ if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+ return -EOPNOTSUPP;
+
+ edata->eee_active = lp->eee_active;
+ edata->eee_enabled = lp->eee_enabled;
+ edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
+ lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
+ edata->tx_lpi_enabled = lpi_enabled;
+
+ if (netif_msg_hw(lp)) {
+ u32 regval;
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+
+ netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
+ dwceqos_get_rx_lpi_state(regval),
+ dwceqos_get_tx_lpi_state(regval));
+ }
+
+ return phy_ethtool_get_eee(lp->phy_dev, edata);
+}
+
+static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+ unsigned long flags;
+
+ if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+ return -EOPNOTSUPP;
+
+ if (edata->eee_enabled && !lp->eee_active)
+ return -EOPNOTSUPP;
+
+ if (edata->tx_lpi_enabled) {
+ if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
+ edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
+ return -EINVAL;
+ }
+
+ lp->eee_enabled = edata->eee_enabled;
+
+ if (edata->eee_enabled && edata->tx_lpi_enabled) {
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
+ edata->tx_lpi_timer);
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
+ if (lp->en_tx_lpi_clockgating)
+ regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ }
+
+ return phy_ethtool_set_eee(lp->phy_dev, edata);
+}
+
+static u32 dwceqos_get_msglevel(struct net_device *ndev)
+{
+ const struct net_local *lp = netdev_priv(ndev);
+
+ return lp->msg_enable;
+}
+
+static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ lp->msg_enable = msglevel;
+}
+
+static struct ethtool_ops dwceqos_ethtool_ops = {
+ .get_settings = dwceqos_get_settings,
+ .set_settings = dwceqos_set_settings,
+ .get_drvinfo = dwceqos_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = dwceqos_get_pauseparam,
+ .set_pauseparam = dwceqos_set_pauseparam,
+ .get_strings = dwceqos_get_strings,
+ .get_ethtool_stats = dwceqos_get_ethtool_stats,
+ .get_sset_count = dwceqos_get_sset_count,
+ .get_regs = dwceqos_get_regs,
+ .get_regs_len = dwceqos_get_regs_len,
+ .get_eee = dwceqos_get_eee,
+ .set_eee = dwceqos_set_eee,
+ .get_msglevel = dwceqos_get_msglevel,
+ .set_msglevel = dwceqos_set_msglevel,
+};
+
+static struct net_device_ops netdev_ops = {
+ .ndo_open = dwceqos_open,
+ .ndo_stop = dwceqos_stop,
+ .ndo_start_xmit = dwceqos_start_xmit,
+ .ndo_set_rx_mode = dwceqos_set_rx_mode,
+ .ndo_set_mac_address = dwceqos_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = dwceqos_poll_controller,
+#endif
+ .ndo_do_ioctl = dwceqos_ioctl,
+ .ndo_tx_timeout = dwceqos_tx_timeout,
+ .ndo_get_stats64 = dwceqos_get_stats64,
+};
+
+static const struct of_device_id dwceq_of_match[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dwceq_of_match);
+
+static int dwceqos_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem = NULL;
+ struct net_device *ndev;
+ struct net_local *lp;
+ int ret = -ENXIO;
+
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_mem) {
+ dev_err(&pdev->dev, "no IO resource defined.\n");
+ return -ENXIO;
+ }
+
+ ndev = alloc_etherdev(sizeof(*lp));
+ if (!ndev) {
+ dev_err(&pdev->dev, "etherdev allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->pdev = pdev;
+ lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
+
+ spin_lock_init(&lp->tx_lock);
+ spin_lock_init(&lp->hw_lock);
+ spin_lock_init(&lp->stats_lock);
+
+ lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(lp->apb_pclk)) {
+ dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+ ret = PTR_ERR(lp->apb_pclk);
+ goto err_out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(lp->apb_pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+ goto err_out_free_netdev;
+ }
+
+ lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->baseaddr)) {
+ dev_err(&pdev->dev, "failed to map baseaddress.\n");
+ ret = PTR_ERR(lp->baseaddr);
+ goto err_out_clk_dis_aper;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
+ ndev->netdev_ops = &netdev_ops;
+ ndev->ethtool_ops = &dwceqos_ethtool_ops;
+ ndev->base_addr = r_mem->start;
+
+ dwceqos_get_hwfeatures(lp);
+ dwceqos_mdio_set_csr(lp);
+
+ ndev->hw_features = NETIF_F_SG;
+
+ if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
+ ndev->hw_features |= NETIF_F_RXCSUM;
+
+ ndev->features = ndev->hw_features;
+
+ netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clk_dis_aper;
+ }
+
+ lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(lp->phy_ref_clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ ret = PTR_ERR(lp->phy_ref_clk);
+ goto err_out_unregister_netdev;
+ }
+
+ ret = clk_prepare_enable(lp->phy_ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto err_out_unregister_netdev;
+ }
+
+ lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+ "phy-handle", 0);
+ if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
+ ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "invalid fixed-link");
+ goto err_out_unregister_netdev;
+ }
+
+ lp->phy_node = of_node_get(lp->pdev->dev.of_node);
+ }
+
+ ret = of_get_phy_mode(lp->pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+ goto err_out_unregister_clk_notifier;
+ }
+
+ lp->phy_interface = ret;
+
+ ret = dwceqos_mii_init(lp);
+ if (ret) {
+ dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
+ goto err_out_unregister_clk_notifier;
+ }
+
+ ret = dwceqos_mii_probe(ndev);
+ if (ret != 0) {
+ netdev_err(ndev, "mii_probe fail.\n");
+ ret = -ENXIO;
+ goto err_out_unregister_clk_notifier;
+ }
+
+ dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+ tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
+ (unsigned long)ndev);
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+ lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+ INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
+
+ platform_set_drvdata(pdev, ndev);
+ ret = dwceqos_probe_config_dt(pdev);
+ if (ret) {
+ dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
+ ret);
+ goto err_out_unregister_clk_notifier;
+ }
+ dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+ pdev->id, ndev->base_addr, ndev->irq);
+
+ ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
+ ndev->irq, ret);
+ goto err_out_unregister_clk_notifier;
+ }
+
+ if (netif_msg_probe(lp))
+ netdev_dbg(ndev, "net_local@%p\n", lp);
+
+ return 0;
+
+err_out_unregister_clk_notifier:
+ clk_disable_unprepare(lp->phy_ref_clk);
+err_out_unregister_netdev:
+ unregister_netdev(ndev);
+err_out_clk_dis_aper:
+ clk_disable_unprepare(lp->apb_pclk);
+err_out_free_netdev:
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int dwceqos_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp;
+
+ if (ndev) {
+ lp = netdev_priv(ndev);
+
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+
+ unregister_netdev(ndev);
+
+ clk_disable_unprepare(lp->phy_ref_clk);
+ clk_disable_unprepare(lp->apb_pclk);
+
+ free_netdev(ndev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver dwceqos_driver = {
+ .probe = dwceqos_probe,
+ .remove = dwceqos_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dwceq_of_match,
+ },
+};
+
+module_platform_driver(dwceqos_driver);
+
+MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
+MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index d155bf2573cd..3b81b39bea6f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -365,7 +365,8 @@ struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
- struct napi_struct napi;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
struct device *dev;
struct cpsw_platform_data data;
struct cpsw_ss_regs __iomem *regs;
@@ -389,7 +390,6 @@ struct cpsw_priv {
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
- bool irq_enabled;
struct cpts *cpts;
u32 emac_port;
};
@@ -752,13 +752,10 @@ static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
+ writel(0, &priv->wr_regs->tx_en);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
- cpdma_chan_process(priv->txch, 128);
-
- priv = cpsw_get_slave_priv(priv, 1);
- if (priv)
- cpdma_chan_process(priv->txch, 128);
+ napi_schedule(&priv->napi_tx);
return IRQ_HANDLED;
}
@@ -767,45 +764,38 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
struct cpsw_priv *priv = dev_id;
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ writel(0, &priv->wr_regs->rx_en);
- cpsw_intr_disable(priv);
- if (priv->irq_enabled == true) {
- disable_irq_nosync(priv->irqs_table[0]);
- priv->irq_enabled = false;
- }
+ napi_schedule(&priv->napi_rx);
+ return IRQ_HANDLED;
+}
+
+static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_priv *priv = napi_to_priv(napi_tx);
+ int num_tx;
- if (netif_running(priv->ndev)) {
- napi_schedule(&priv->napi);
- return IRQ_HANDLED;
+ num_tx = cpdma_chan_process(priv->txch, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &priv->wr_regs->tx_en);
}
- priv = cpsw_get_slave_priv(priv, 1);
- if (!priv)
- return IRQ_NONE;
+ if (num_tx)
+ cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
- if (netif_running(priv->ndev)) {
- napi_schedule(&priv->napi);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
+ return num_tx;
}
-static int cpsw_poll(struct napi_struct *napi, int budget)
+static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
{
- struct cpsw_priv *priv = napi_to_priv(napi);
+ struct cpsw_priv *priv = napi_to_priv(napi_rx);
int num_rx;
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
- struct cpsw_priv *prim_cpsw;
-
- napi_complete(napi);
- cpsw_intr_enable(priv);
- prim_cpsw = cpsw_get_slave_priv(priv, 0);
- if (prim_cpsw->irq_enabled == false) {
- prim_cpsw->irq_enabled = true;
- enable_irq(priv->irqs_table[0]);
- }
+ napi_complete(napi_rx);
+ writel(0xff, &priv->wr_regs->rx_en);
}
if (num_rx)
@@ -1230,7 +1220,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
static int cpsw_ndo_open(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_priv *prim_cpsw;
int i, ret;
u32 reg;
@@ -1260,6 +1249,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
ALE_ALL_PORTS << priv->host_port, 0, 0);
if (!cpsw_common_res_usage_state(priv)) {
+ struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
/* setup tx dma to fixed prio and zero offset */
cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
@@ -1273,6 +1264,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
/* Enable internal fifo flow control */
writel(0x7, &priv->regs->flow_control);
+ napi_enable(&priv_sl0->napi_rx);
+ napi_enable(&priv_sl0->napi_tx);
+
if (WARN_ON(!priv->data.rx_descs))
priv->data.rx_descs = 128;
@@ -1311,18 +1305,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_set_coalesce(ndev, &coal);
}
- napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
- prim_cpsw = cpsw_get_slave_priv(priv, 0);
- if (prim_cpsw->irq_enabled == false) {
- if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
- prim_cpsw->irq_enabled = true;
- enable_irq(prim_cpsw->irqs_table[0]);
- }
- }
-
if (priv->data.dual_emac)
priv->slaves[priv->emac_port].open_stat = true;
return 0;
@@ -1341,10 +1326,13 @@ static int cpsw_ndo_stop(struct net_device *ndev)
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
netif_stop_queue(priv->ndev);
- napi_disable(&priv->napi);
netif_carrier_off(priv->ndev);
if (cpsw_common_res_usage_state(priv) <= 1) {
+ struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
+ napi_disable(&priv_sl0->napi_rx);
+ napi_disable(&priv_sl0->napi_tx);
cpts_unregister(priv->cpts);
cpsw_intr_disable(priv);
cpdma_ctlr_stop(priv->dma);
@@ -2127,7 +2115,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2169,7 +2156,6 @@ static int cpsw_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
- priv->irq_enabled = true;
if (!priv->cpts) {
dev_err(&pdev->dev, "error allocating cpts\n");
ret = -ENOMEM;
@@ -2380,7 +2366,8 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 4755838c6137..1a5aca55ea9f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -52,6 +52,8 @@
NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
NETIF_MSG_RX_STATUS)
+#define NETCP_EFUSE_ADDR_SWAP 2
+
#define knav_queue_get_id(q) knav_queue_device_control(q, \
KNAV_QUEUE_GET_ID, (unsigned long)NULL)
@@ -173,13 +175,22 @@ static void set_words(u32 *words, int num_words, u32 *desc)
}
/* Read the e-fuse value as 32 bit values to be endian independent */
-static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
{
unsigned int addr0, addr1;
addr1 = readl(efuse_mac + 4);
addr0 = readl(efuse_mac);
+ switch (swap) {
+ case NETCP_EFUSE_ADDR_SWAP:
+ addr0 = addr1;
+ addr1 = readl(efuse_mac);
+ break;
+ default:
+ break;
+ }
+
x[0] = (addr1 & 0x0000ff00) >> 8;
x[1] = addr1 & 0x000000ff;
x[2] = (addr0 & 0xff000000) >> 24;
@@ -1901,7 +1912,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
goto quit;
}
- emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+ emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
if (is_valid_ether_addr(efuse_mac_addr))
ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
else
@@ -2141,7 +2152,6 @@ MODULE_DEVICE_TABLE(of, of_match);
static struct platform_driver netcp_driver = {
.driver = {
.name = "netcp-1.0",
- .owner = THIS_MODULE,
.of_match_table = of_match,
},
.probe = netcp_probe,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 1974a8ae764a..6f16d6aaf7b7 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -295,8 +295,6 @@ struct xgbe_hw_stats {
u32 rx_dma_overruns;
};
-#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
-
struct gbenu_ss_regs {
u32 id_ver;
u32 synce_count; /* NU */
@@ -480,7 +478,6 @@ struct gbenu_hw_stats {
u32 tx_pri7_drop_bcnt;
};
-#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
#define GBENU_HW_STATS_REG_MAP_SZ 0x200
struct gbe_ss_regs {
@@ -615,7 +612,6 @@ struct gbe_hw_stats {
u32 rx_dma_overruns;
};
-#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
#define GBE_MAX_HW_STAT_MODS 9
#define GBE_HW_STATS_REG_MAP_SZ 0x100
@@ -646,6 +642,7 @@ struct gbe_priv {
bool enable_ale;
u8 max_num_slaves;
u8 max_num_ports; /* max_num_slaves + 1 */
+ u8 num_stats_mods;
struct netcp_tx_pipe tx_pipe;
int host_port;
@@ -675,6 +672,7 @@ struct gbe_priv {
struct net_device *dummy_ndev;
u64 *hw_stats;
+ u32 *hw_stats_prev;
const struct netcp_ethtool_stat *et_stats;
int num_et_stats;
/* Lock for updating the hwstats */
@@ -874,7 +872,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
};
/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_HOST_SIZE 33
+#define GBENU_ET_STATS_HOST_SIZE 52
#define GBENU_STATS_HOST(field) \
{ \
@@ -883,8 +881,8 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
offsetof(struct gbenu_hw_stats, field) \
}
-/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_PORT_SIZE 46
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE 65
#define GBENU_STATS_P1(field) \
{ \
@@ -976,7 +974,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_HOST(ale_unknown_mcast_bytes),
GBENU_STATS_HOST(ale_unknown_bcast),
GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+ GBENU_STATS_HOST(ale_pol_match),
+ GBENU_STATS_HOST(ale_pol_match_red),
+ GBENU_STATS_HOST(ale_pol_match_yellow),
GBENU_STATS_HOST(tx_mem_protect_err),
+ GBENU_STATS_HOST(tx_pri0_drop),
+ GBENU_STATS_HOST(tx_pri1_drop),
+ GBENU_STATS_HOST(tx_pri2_drop),
+ GBENU_STATS_HOST(tx_pri3_drop),
+ GBENU_STATS_HOST(tx_pri4_drop),
+ GBENU_STATS_HOST(tx_pri5_drop),
+ GBENU_STATS_HOST(tx_pri6_drop),
+ GBENU_STATS_HOST(tx_pri7_drop),
+ GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri7_drop_bcnt),
/* GBENU Module 1 */
GBENU_STATS_P1(rx_good_frames),
GBENU_STATS_P1(rx_broadcast_frames),
@@ -1023,7 +1040,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P1(ale_unknown_mcast_bytes),
GBENU_STATS_P1(ale_unknown_bcast),
GBENU_STATS_P1(ale_unknown_bcast_bytes),
+ GBENU_STATS_P1(ale_pol_match),
+ GBENU_STATS_P1(ale_pol_match_red),
+ GBENU_STATS_P1(ale_pol_match_yellow),
GBENU_STATS_P1(tx_mem_protect_err),
+ GBENU_STATS_P1(tx_pri0_drop),
+ GBENU_STATS_P1(tx_pri1_drop),
+ GBENU_STATS_P1(tx_pri2_drop),
+ GBENU_STATS_P1(tx_pri3_drop),
+ GBENU_STATS_P1(tx_pri4_drop),
+ GBENU_STATS_P1(tx_pri5_drop),
+ GBENU_STATS_P1(tx_pri6_drop),
+ GBENU_STATS_P1(tx_pri7_drop),
+ GBENU_STATS_P1(tx_pri0_drop_bcnt),
+ GBENU_STATS_P1(tx_pri1_drop_bcnt),
+ GBENU_STATS_P1(tx_pri2_drop_bcnt),
+ GBENU_STATS_P1(tx_pri3_drop_bcnt),
+ GBENU_STATS_P1(tx_pri4_drop_bcnt),
+ GBENU_STATS_P1(tx_pri5_drop_bcnt),
+ GBENU_STATS_P1(tx_pri6_drop_bcnt),
+ GBENU_STATS_P1(tx_pri7_drop_bcnt),
/* GBENU Module 2 */
GBENU_STATS_P2(rx_good_frames),
GBENU_STATS_P2(rx_broadcast_frames),
@@ -1070,7 +1106,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P2(ale_unknown_mcast_bytes),
GBENU_STATS_P2(ale_unknown_bcast),
GBENU_STATS_P2(ale_unknown_bcast_bytes),
+ GBENU_STATS_P2(ale_pol_match),
+ GBENU_STATS_P2(ale_pol_match_red),
+ GBENU_STATS_P2(ale_pol_match_yellow),
GBENU_STATS_P2(tx_mem_protect_err),
+ GBENU_STATS_P2(tx_pri0_drop),
+ GBENU_STATS_P2(tx_pri1_drop),
+ GBENU_STATS_P2(tx_pri2_drop),
+ GBENU_STATS_P2(tx_pri3_drop),
+ GBENU_STATS_P2(tx_pri4_drop),
+ GBENU_STATS_P2(tx_pri5_drop),
+ GBENU_STATS_P2(tx_pri6_drop),
+ GBENU_STATS_P2(tx_pri7_drop),
+ GBENU_STATS_P2(tx_pri0_drop_bcnt),
+ GBENU_STATS_P2(tx_pri1_drop_bcnt),
+ GBENU_STATS_P2(tx_pri2_drop_bcnt),
+ GBENU_STATS_P2(tx_pri3_drop_bcnt),
+ GBENU_STATS_P2(tx_pri4_drop_bcnt),
+ GBENU_STATS_P2(tx_pri5_drop_bcnt),
+ GBENU_STATS_P2(tx_pri6_drop_bcnt),
+ GBENU_STATS_P2(tx_pri7_drop_bcnt),
/* GBENU Module 3 */
GBENU_STATS_P3(rx_good_frames),
GBENU_STATS_P3(rx_broadcast_frames),
@@ -1117,7 +1172,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P3(ale_unknown_mcast_bytes),
GBENU_STATS_P3(ale_unknown_bcast),
GBENU_STATS_P3(ale_unknown_bcast_bytes),
+ GBENU_STATS_P3(ale_pol_match),
+ GBENU_STATS_P3(ale_pol_match_red),
+ GBENU_STATS_P3(ale_pol_match_yellow),
GBENU_STATS_P3(tx_mem_protect_err),
+ GBENU_STATS_P3(tx_pri0_drop),
+ GBENU_STATS_P3(tx_pri1_drop),
+ GBENU_STATS_P3(tx_pri2_drop),
+ GBENU_STATS_P3(tx_pri3_drop),
+ GBENU_STATS_P3(tx_pri4_drop),
+ GBENU_STATS_P3(tx_pri5_drop),
+ GBENU_STATS_P3(tx_pri6_drop),
+ GBENU_STATS_P3(tx_pri7_drop),
+ GBENU_STATS_P3(tx_pri0_drop_bcnt),
+ GBENU_STATS_P3(tx_pri1_drop_bcnt),
+ GBENU_STATS_P3(tx_pri2_drop_bcnt),
+ GBENU_STATS_P3(tx_pri3_drop_bcnt),
+ GBENU_STATS_P3(tx_pri4_drop_bcnt),
+ GBENU_STATS_P3(tx_pri5_drop_bcnt),
+ GBENU_STATS_P3(tx_pri6_drop_bcnt),
+ GBENU_STATS_P3(tx_pri7_drop_bcnt),
/* GBENU Module 4 */
GBENU_STATS_P4(rx_good_frames),
GBENU_STATS_P4(rx_broadcast_frames),
@@ -1164,7 +1238,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P4(ale_unknown_mcast_bytes),
GBENU_STATS_P4(ale_unknown_bcast),
GBENU_STATS_P4(ale_unknown_bcast_bytes),
+ GBENU_STATS_P4(ale_pol_match),
+ GBENU_STATS_P4(ale_pol_match_red),
+ GBENU_STATS_P4(ale_pol_match_yellow),
GBENU_STATS_P4(tx_mem_protect_err),
+ GBENU_STATS_P4(tx_pri0_drop),
+ GBENU_STATS_P4(tx_pri1_drop),
+ GBENU_STATS_P4(tx_pri2_drop),
+ GBENU_STATS_P4(tx_pri3_drop),
+ GBENU_STATS_P4(tx_pri4_drop),
+ GBENU_STATS_P4(tx_pri5_drop),
+ GBENU_STATS_P4(tx_pri6_drop),
+ GBENU_STATS_P4(tx_pri7_drop),
+ GBENU_STATS_P4(tx_pri0_drop_bcnt),
+ GBENU_STATS_P4(tx_pri1_drop_bcnt),
+ GBENU_STATS_P4(tx_pri2_drop_bcnt),
+ GBENU_STATS_P4(tx_pri3_drop_bcnt),
+ GBENU_STATS_P4(tx_pri4_drop_bcnt),
+ GBENU_STATS_P4(tx_pri5_drop_bcnt),
+ GBENU_STATS_P4(tx_pri6_drop_bcnt),
+ GBENU_STATS_P4(tx_pri7_drop_bcnt),
/* GBENU Module 5 */
GBENU_STATS_P5(rx_good_frames),
GBENU_STATS_P5(rx_broadcast_frames),
@@ -1211,7 +1304,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P5(ale_unknown_mcast_bytes),
GBENU_STATS_P5(ale_unknown_bcast),
GBENU_STATS_P5(ale_unknown_bcast_bytes),
+ GBENU_STATS_P5(ale_pol_match),
+ GBENU_STATS_P5(ale_pol_match_red),
+ GBENU_STATS_P5(ale_pol_match_yellow),
GBENU_STATS_P5(tx_mem_protect_err),
+ GBENU_STATS_P5(tx_pri0_drop),
+ GBENU_STATS_P5(tx_pri1_drop),
+ GBENU_STATS_P5(tx_pri2_drop),
+ GBENU_STATS_P5(tx_pri3_drop),
+ GBENU_STATS_P5(tx_pri4_drop),
+ GBENU_STATS_P5(tx_pri5_drop),
+ GBENU_STATS_P5(tx_pri6_drop),
+ GBENU_STATS_P5(tx_pri7_drop),
+ GBENU_STATS_P5(tx_pri0_drop_bcnt),
+ GBENU_STATS_P5(tx_pri1_drop_bcnt),
+ GBENU_STATS_P5(tx_pri2_drop_bcnt),
+ GBENU_STATS_P5(tx_pri3_drop_bcnt),
+ GBENU_STATS_P5(tx_pri4_drop_bcnt),
+ GBENU_STATS_P5(tx_pri5_drop_bcnt),
+ GBENU_STATS_P5(tx_pri6_drop_bcnt),
+ GBENU_STATS_P5(tx_pri7_drop_bcnt),
/* GBENU Module 6 */
GBENU_STATS_P6(rx_good_frames),
GBENU_STATS_P6(rx_broadcast_frames),
@@ -1258,7 +1370,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P6(ale_unknown_mcast_bytes),
GBENU_STATS_P6(ale_unknown_bcast),
GBENU_STATS_P6(ale_unknown_bcast_bytes),
+ GBENU_STATS_P6(ale_pol_match),
+ GBENU_STATS_P6(ale_pol_match_red),
+ GBENU_STATS_P6(ale_pol_match_yellow),
GBENU_STATS_P6(tx_mem_protect_err),
+ GBENU_STATS_P6(tx_pri0_drop),
+ GBENU_STATS_P6(tx_pri1_drop),
+ GBENU_STATS_P6(tx_pri2_drop),
+ GBENU_STATS_P6(tx_pri3_drop),
+ GBENU_STATS_P6(tx_pri4_drop),
+ GBENU_STATS_P6(tx_pri5_drop),
+ GBENU_STATS_P6(tx_pri6_drop),
+ GBENU_STATS_P6(tx_pri7_drop),
+ GBENU_STATS_P6(tx_pri0_drop_bcnt),
+ GBENU_STATS_P6(tx_pri1_drop_bcnt),
+ GBENU_STATS_P6(tx_pri2_drop_bcnt),
+ GBENU_STATS_P6(tx_pri3_drop_bcnt),
+ GBENU_STATS_P6(tx_pri4_drop_bcnt),
+ GBENU_STATS_P6(tx_pri5_drop_bcnt),
+ GBENU_STATS_P6(tx_pri6_drop_bcnt),
+ GBENU_STATS_P6(tx_pri7_drop_bcnt),
/* GBENU Module 7 */
GBENU_STATS_P7(rx_good_frames),
GBENU_STATS_P7(rx_broadcast_frames),
@@ -1305,7 +1436,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P7(ale_unknown_mcast_bytes),
GBENU_STATS_P7(ale_unknown_bcast),
GBENU_STATS_P7(ale_unknown_bcast_bytes),
+ GBENU_STATS_P7(ale_pol_match),
+ GBENU_STATS_P7(ale_pol_match_red),
+ GBENU_STATS_P7(ale_pol_match_yellow),
GBENU_STATS_P7(tx_mem_protect_err),
+ GBENU_STATS_P7(tx_pri0_drop),
+ GBENU_STATS_P7(tx_pri1_drop),
+ GBENU_STATS_P7(tx_pri2_drop),
+ GBENU_STATS_P7(tx_pri3_drop),
+ GBENU_STATS_P7(tx_pri4_drop),
+ GBENU_STATS_P7(tx_pri5_drop),
+ GBENU_STATS_P7(tx_pri6_drop),
+ GBENU_STATS_P7(tx_pri7_drop),
+ GBENU_STATS_P7(tx_pri0_drop_bcnt),
+ GBENU_STATS_P7(tx_pri1_drop_bcnt),
+ GBENU_STATS_P7(tx_pri2_drop_bcnt),
+ GBENU_STATS_P7(tx_pri3_drop_bcnt),
+ GBENU_STATS_P7(tx_pri4_drop_bcnt),
+ GBENU_STATS_P7(tx_pri5_drop_bcnt),
+ GBENU_STATS_P7(tx_pri6_drop_bcnt),
+ GBENU_STATS_P7(tx_pri7_drop_bcnt),
/* GBENU Module 8 */
GBENU_STATS_P8(rx_good_frames),
GBENU_STATS_P8(rx_broadcast_frames),
@@ -1352,7 +1502,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P8(ale_unknown_mcast_bytes),
GBENU_STATS_P8(ale_unknown_bcast),
GBENU_STATS_P8(ale_unknown_bcast_bytes),
+ GBENU_STATS_P8(ale_pol_match),
+ GBENU_STATS_P8(ale_pol_match_red),
+ GBENU_STATS_P8(ale_pol_match_yellow),
GBENU_STATS_P8(tx_mem_protect_err),
+ GBENU_STATS_P8(tx_pri0_drop),
+ GBENU_STATS_P8(tx_pri1_drop),
+ GBENU_STATS_P8(tx_pri2_drop),
+ GBENU_STATS_P8(tx_pri3_drop),
+ GBENU_STATS_P8(tx_pri4_drop),
+ GBENU_STATS_P8(tx_pri5_drop),
+ GBENU_STATS_P8(tx_pri6_drop),
+ GBENU_STATS_P8(tx_pri7_drop),
+ GBENU_STATS_P8(tx_pri0_drop_bcnt),
+ GBENU_STATS_P8(tx_pri1_drop_bcnt),
+ GBENU_STATS_P8(tx_pri2_drop_bcnt),
+ GBENU_STATS_P8(tx_pri3_drop_bcnt),
+ GBENU_STATS_P8(tx_pri4_drop_bcnt),
+ GBENU_STATS_P8(tx_pri5_drop_bcnt),
+ GBENU_STATS_P8(tx_pri6_drop_bcnt),
+ GBENU_STATS_P8(tx_pri7_drop_bcnt),
};
#define XGBE_STATS0_INFO(field) \
@@ -1554,70 +1723,97 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset)
}
}
-static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
+{
+ void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+ u32 __iomem *p_stats_entry;
+ int i;
+
+ for (i = 0; i < gbe_dev->num_et_stats; i++) {
+ if (gbe_dev->et_stats[i].type == stats_mod) {
+ p_stats_entry = base + gbe_dev->et_stats[i].offset;
+ gbe_dev->hw_stats[i] = 0;
+ gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+ }
+ }
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+ int et_stats_entry)
{
void __iomem *base = NULL;
- u32 __iomem *p;
- u32 tmp = 0;
+ u32 __iomem *p_stats_entry;
+ u32 curr, delta;
+
+ /* The hw_stats_regs pointers are already
+ * properly set to point to the right base:
+ */
+ base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+ p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+ curr = readl(p_stats_entry);
+ delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+ gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+ gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
int i;
for (i = 0; i < gbe_dev->num_et_stats; i++) {
- base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
- p = base + gbe_dev->et_stats[i].offset;
- tmp = readl(p);
- gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+ gbe_update_hw_stats_entry(gbe_dev, i);
+
if (data)
data[i] = gbe_dev->hw_stats[i];
- /* write-to-decrement:
- * new register value = old register value - write value
- */
- writel(tmp, p);
}
}
-static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+ int stats_mod)
{
- void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
- void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
- u64 *hw_stats = &gbe_dev->hw_stats[0];
- void __iomem *base = NULL;
- u32 __iomem *p;
- u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
- int i, j, pair;
+ u32 val;
- for (pair = 0; pair < 2; pair++) {
- val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+ val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
- if (pair == 0)
- val &= ~GBE_STATS_CD_SEL;
- else
- val |= GBE_STATS_CD_SEL;
+ switch (stats_mod) {
+ case GBE_STATSA_MODULE:
+ case GBE_STATSB_MODULE:
+ val &= ~GBE_STATS_CD_SEL;
+ break;
+ case GBE_STATSC_MODULE:
+ case GBE_STATSD_MODULE:
+ val |= GBE_STATS_CD_SEL;
+ break;
+ default:
+ return;
+ }
- /* make the stat modules visible */
- writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+ /* make the stat module visible */
+ writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
- for (i = 0; i < pair_size; i++) {
- j = pair * pair_size + i;
- switch (gbe_dev->et_stats[j].type) {
- case GBE_STATSA_MODULE:
- case GBE_STATSC_MODULE:
- base = gbe_statsa;
- break;
- case GBE_STATSB_MODULE:
- case GBE_STATSD_MODULE:
- base = gbe_statsb;
- break;
- }
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+ gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+ gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
+static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+ u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+ int et_entry, j, pair;
+
+ for (pair = 0; pair < 2; pair++) {
+ gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+ GBE_STATSC_MODULE :
+ GBE_STATSA_MODULE));
+
+ for (j = 0; j < half_num_et_stats; j++) {
+ et_entry = pair * half_num_et_stats + j;
+ gbe_update_hw_stats_entry(gbe_dev, et_entry);
- p = base + gbe_dev->et_stats[j].offset;
- tmp = readl(p);
- hw_stats[j] += tmp;
if (data)
- data[j] = hw_stats[j];
- /* write-to-decrement:
- * new register value = old register value - write value
- */
- writel(tmp, p);
+ data[et_entry] = gbe_dev->hw_stats[et_entry];
}
}
}
@@ -2207,14 +2403,15 @@ static void netcp_ethss_timer(unsigned long arg)
netcp_ethss_update_link_state(gbe_dev, slave, NULL);
}
- spin_lock_bh(&gbe_dev->hw_stats_lock);
+ /* A timer runs as a BH, no need to block them */
+ spin_lock(&gbe_dev->hw_stats_lock);
if (gbe_dev->ss_version == GBE_SS_VERSION_14)
gbe_update_stats_ver14(gbe_dev, NULL);
else
gbe_update_stats(gbe_dev, NULL);
- spin_unlock_bh(&gbe_dev->hw_stats_lock);
+ spin_unlock(&gbe_dev->hw_stats_lock);
gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
add_timer(&gbe_dev->timer);
@@ -2571,15 +2768,28 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
}
gbe_dev->xgbe_serdes_regs = regs;
+ gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+ gbe_dev->et_stats = xgbe10_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- XGBE10_NUM_STAT_ENTRIES *
- (gbe_dev->max_num_ports) * sizeof(u64),
- GFP_KERNEL);
+ gbe_dev->num_et_stats * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
+ gbe_dev->hw_stats_prev =
+ devm_kzalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats * sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
gbe_dev->ss_version = XGBE_SS_VERSION_10;
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
XGBE10_SGMII_MODULE_OFFSET;
@@ -2593,8 +2803,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
- gbe_dev->et_stats = xgbe10_et_stats;
- gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
/* Subsystem registers */
@@ -2679,30 +2887,45 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
}
gbe_dev->switch_regs = regs;
+ gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+ gbe_dev->et_stats = gbe13_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- GBE13_NUM_HW_STAT_ENTRIES *
- gbe_dev->max_num_slaves * sizeof(u64),
- GFP_KERNEL);
+ gbe_dev->num_et_stats * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
+ gbe_dev->hw_stats_prev =
+ devm_kzalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats * sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
+ /* K2HK has only 2 hw stats modules visible at a time, so
+ * module 0 & 2 points to one base and
+ * module 1 & 3 points to the other base
+ */
for (i = 0; i < gbe_dev->max_num_slaves; i++) {
gbe_dev->hw_stats_regs[i] =
gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
- (GBE_HW_STATS_REG_MAP_SZ * i);
+ (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
}
gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBE13_HOST_PORT_NUM;
gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
- gbe_dev->et_stats = gbe13_et_stats;
- gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
/* Subsystem registers */
@@ -2729,15 +2952,34 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
void __iomem *regs;
int i, ret;
+ gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+ gbe_dev->et_stats = gbenu_et_stats;
+
+ if (IS_SS_ID_NU(gbe_dev))
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+ else
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ GBENU_ET_STATS_PORT_SIZE;
+
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- GBENU_NUM_HW_STAT_ENTRIES *
- (gbe_dev->max_num_ports) * sizeof(u64),
- GFP_KERNEL);
+ gbe_dev->num_et_stats * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
+ gbe_dev->hw_stats_prev =
+ devm_kzalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats * sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
@@ -2765,16 +3007,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBENU_HOST_PORT_NUM;
gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
- gbe_dev->et_stats = gbenu_et_stats;
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
- if (IS_SS_ID_NU(gbe_dev))
- gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
- (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
- else
- gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
- GBENU_ET_STATS_PORT_SIZE;
-
/* Subsystem registers */
GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
@@ -2804,7 +3038,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
struct cpsw_ale_params ale_params;
struct gbe_priv *gbe_dev;
u32 slave_num;
- int ret = 0;
+ int i, ret = 0;
if (!node) {
dev_err(dev, "device tree info unavailable\n");
@@ -2951,6 +3185,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
/* initialize host port */
gbe_init_host_port(gbe_dev);
+ spin_lock_bh(&gbe_dev->hw_stats_lock);
+ for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ gbe_reset_mod_stats_ver14(gbe_dev, i);
+ else
+ gbe_reset_mod_stats(gbe_dev, i);
+ }
+ spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
init_timer(&gbe_dev->timer);
gbe_dev->timer.data = (unsigned long)gbe_dev;
gbe_dev->timer.function = netcp_ethss_timer;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index dd4544085db3..5fa98f599b3d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -162,6 +162,7 @@ struct netvsc_device_info {
bool link_state; /* 0 - link up, 1 - link down */
int ring_size;
u32 max_num_vrss_chns;
+ u32 num_chn;
};
enum rndis_device_state {
@@ -541,6 +542,29 @@ union nvsp_2_message_uber {
struct nvsp_2_free_rxbuf free_rxbuf;
} __packed;
+struct nvsp_4_send_vf_association {
+ /* 1: allocated, serial number is valid. 0: not allocated */
+ u32 allocated;
+
+ /* Serial number of the VF to team with */
+ u32 serial;
+} __packed;
+
+enum nvsp_vm_datapath {
+ NVSP_DATAPATH_SYNTHETIC = 0,
+ NVSP_DATAPATH_VF,
+ NVSP_DATAPATH_MAX
+};
+
+struct nvsp_4_sw_datapath {
+ u32 active_datapath; /* active data path in VM */
+} __packed;
+
+union nvsp_4_message_uber {
+ struct nvsp_4_send_vf_association vf_assoc;
+ struct nvsp_4_sw_datapath active_dp;
+} __packed;
+
enum nvsp_subchannel_operation {
NVSP_SUBCHANNEL_NONE = 0,
NVSP_SUBCHANNEL_ALLOCATE,
@@ -578,6 +602,7 @@ union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
union nvsp_2_message_uber v2_msg;
+ union nvsp_4_message_uber v4_msg;
union nvsp_5_message_uber v5_msg;
} __packed;
@@ -589,6 +614,7 @@ struct nvsp_message {
#define NETVSC_MTU 65536
+#define NETVSC_MTU_MIN 68
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
@@ -670,6 +696,8 @@ struct netvsc_device {
u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
+ spinlock_t sc_lock; /* Protects num_sc_offered variable */
+ u32 num_sc_offered;
atomic_t queue_sends[NR_CPUS];
/* Holds rndis device info */
@@ -688,6 +716,11 @@ struct netvsc_device {
/* The net device context */
struct net_device_context *nd_ctx;
+
+ /* 1: allocated, serial number is valid. 0: not allocated */
+ u32 vf_alloc;
+ /* Serial number of the VF to team with */
+ u32 vf_serial;
};
/* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 23126a74f357..51e4c0fd0a74 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -453,13 +453,16 @@ static int negotiate_nvsp_ver(struct hv_device *device,
if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
return 0;
- /* NVSPv2 only: Send NDIS config */
+ /* NVSPv2 or later: Send NDIS config */
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
ETH_HLEN;
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
+ if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
+ init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
@@ -1064,11 +1067,10 @@ static void netvsc_receive(struct netvsc_device *net_device,
static void netvsc_send_table(struct hv_device *hdev,
- struct vmpacket_descriptor *vmpkt)
+ struct nvsp_message *nvmsg)
{
struct netvsc_device *nvscdev;
struct net_device *ndev;
- struct nvsp_message *nvmsg;
int i;
u32 count, *tab;
@@ -1077,12 +1079,6 @@ static void netvsc_send_table(struct hv_device *hdev,
return;
ndev = nvscdev->ndev;
- nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
- (vmpkt->offset8 << 3));
-
- if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
- return;
-
count = nvmsg->msg.v5_msg.send_table.count;
if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1096,6 +1092,28 @@ static void netvsc_send_table(struct hv_device *hdev,
nvscdev->send_table[i] = tab[i];
}
+static void netvsc_send_vf(struct netvsc_device *nvdev,
+ struct nvsp_message *nvmsg)
+{
+ nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+ nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+}
+
+static inline void netvsc_receive_inband(struct hv_device *hdev,
+ struct netvsc_device *nvdev,
+ struct nvsp_message *nvmsg)
+{
+ switch (nvmsg->hdr.msg_type) {
+ case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
+ netvsc_send_table(hdev, nvmsg);
+ break;
+
+ case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
+ netvsc_send_vf(nvdev, nvmsg);
+ break;
+ }
+}
+
void netvsc_channel_cb(void *context)
{
int ret;
@@ -1108,6 +1126,7 @@ void netvsc_channel_cb(void *context)
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
+ struct nvsp_message *nvmsg;
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1126,6 +1145,8 @@ void netvsc_channel_cb(void *context)
if (ret == 0) {
if (bytes_recvd > 0) {
desc = (struct vmpacket_descriptor *)buffer;
+ nvmsg = (struct nvsp_message *)((unsigned long)
+ desc + (desc->offset8 << 3));
switch (desc->type) {
case VM_PKT_COMP:
netvsc_send_completion(net_device,
@@ -1138,7 +1159,9 @@ void netvsc_channel_cb(void *context)
break;
case VM_PKT_DATA_INBAND:
- netvsc_send_table(device, desc);
+ netvsc_receive_inband(device,
+ net_device,
+ nvmsg);
break;
default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 358475ed9b59..f3b9d3eb753b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -106,7 +106,7 @@ static int netvsc_open(struct net_device *net)
return ret;
}
- netif_tx_start_all_queues(net);
+ netif_tx_wake_all_queues(net);
nvdev = hv_get_drvdata(device_obj);
rdev = nvdev->extension;
@@ -120,15 +120,56 @@ static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
+ struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
int ret;
+ u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
+ struct vmbus_channel *chn;
netif_tx_disable(net);
/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
cancel_work_sync(&net_device_ctx->work);
ret = rndis_filter_close(device_obj);
- if (ret != 0)
+ if (ret != 0) {
netdev_err(net, "unable to close device (ret %d).\n", ret);
+ return ret;
+ }
+
+ /* Ensure pending bytes in ring are read */
+ while (true) {
+ aread = 0;
+ for (i = 0; i < nvdev->num_chn; i++) {
+ chn = nvdev->chn_table[i];
+ if (!chn)
+ continue;
+
+ hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
+ &awrite);
+
+ if (aread)
+ break;
+
+ hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
+ &awrite);
+
+ if (aread)
+ break;
+ }
+
+ retry++;
+ if (retry > retry_max || aread == 0)
+ break;
+
+ msleep(msec);
+
+ if (msec < 1000)
+ msec *= 2;
+ }
+
+ if (aread) {
+ netdev_err(net, "Ring buffer not empty after closing rndis\n");
+ ret = -ETIMEDOUT;
+ }
return ret;
}
@@ -729,6 +770,101 @@ static void netvsc_get_channels(struct net_device *net,
}
}
+static int netvsc_set_channels(struct net_device *net,
+ struct ethtool_channels *channels)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct hv_device *dev = net_device_ctx->device_ctx;
+ struct netvsc_device *nvdev = hv_get_drvdata(dev);
+ struct netvsc_device_info device_info;
+ const u32 num_chn = nvdev->num_chn;
+ const u32 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
+ int ret = 0;
+ bool recovering = false;
+
+ if (!nvdev || nvdev->destroy)
+ return -ENODEV;
+
+ if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
+ pr_info("vRSS unsupported before NVSP Version 5\n");
+ return -EINVAL;
+ }
+
+ /* We do not support rx, tx, or other */
+ if (!channels ||
+ channels->rx_count ||
+ channels->tx_count ||
+ channels->other_count ||
+ (channels->combined_count < 1))
+ return -EINVAL;
+
+ if (channels->combined_count > max_chn) {
+ pr_info("combined channels too high, using %d\n", max_chn);
+ channels->combined_count = max_chn;
+ }
+
+ ret = netvsc_close(net);
+ if (ret)
+ goto out;
+
+ do_set:
+ nvdev->start_remove = true;
+ rndis_filter_device_remove(dev);
+
+ nvdev->num_chn = channels->combined_count;
+
+ net_device_ctx->device_ctx = dev;
+ hv_set_drvdata(dev, net);
+
+ memset(&device_info, 0, sizeof(device_info));
+ device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
+ device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = max_num_vrss_chns;
+
+ ret = rndis_filter_device_add(dev, &device_info);
+ if (ret) {
+ if (recovering) {
+ netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+ return ret;
+ }
+ goto recover;
+ }
+
+ nvdev = hv_get_drvdata(dev);
+
+ ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
+ if (ret) {
+ if (recovering) {
+ netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
+ return ret;
+ }
+ goto recover;
+ }
+
+ ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
+ if (ret) {
+ if (recovering) {
+ netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
+ return ret;
+ }
+ goto recover;
+ }
+
+ out:
+ netvsc_open(net);
+
+ return ret;
+
+ recover:
+ /* If the above failed, we attempt to recover through the same
+ * process but with the original number of channels.
+ */
+ netdev_err(net, "could not set channels, recovering\n");
+ recovering = true;
+ channels->combined_count = num_chn;
+ goto do_set;
+}
+
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
@@ -736,6 +872,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct netvsc_device *nvdev = hv_get_drvdata(hdev);
struct netvsc_device_info device_info;
int limit = ETH_DATA_LEN;
+ int ret = 0;
if (nvdev == NULL || nvdev->destroy)
return -ENODEV;
@@ -743,25 +880,31 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
limit = NETVSC_MTU - ETH_HLEN;
- /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
- if (mtu < ETH_DATA_LEN || mtu > limit)
+ if (mtu < NETVSC_MTU_MIN || mtu > limit)
return -EINVAL;
+ ret = netvsc_close(ndev);
+ if (ret)
+ goto out;
+
nvdev->start_remove = true;
- cancel_work_sync(&ndevctx->work);
- netif_tx_disable(ndev);
rndis_filter_device_remove(hdev);
ndev->mtu = mtu;
ndevctx->device_ctx = hdev;
hv_set_drvdata(hdev, ndev);
+
+ memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
+ device_info.num_chn = nvdev->num_chn;
device_info.max_num_vrss_chns = max_num_vrss_chns;
rndis_filter_device_add(hdev, &device_info);
- netif_tx_wake_all_queues(ndev);
- return 0;
+out:
+ netvsc_open(ndev);
+
+ return ret;
}
static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
@@ -844,6 +987,7 @@ static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_channels = netvsc_get_channels,
+ .set_channels = netvsc_set_channels,
};
static const struct net_device_ops device_ops = {
@@ -977,6 +1121,7 @@ static int netvsc_probe(struct hv_device *dev,
net->needed_headroom = max_needed_headroom;
/* Notify the netvsc driver of the new device */
+ memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.max_num_vrss_chns = max_num_vrss_chns;
ret = rndis_filter_device_add(dev, &device_info);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 236aeb76ef22..5931a799aa17 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -984,9 +984,16 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
struct netvsc_device *nvscdev;
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
int ret;
+ unsigned long flags;
nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
+ spin_lock_irqsave(&nvscdev->sc_lock, flags);
+ nvscdev->num_sc_offered--;
+ spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
+ if (nvscdev->num_sc_offered == 0)
+ complete(&nvscdev->channel_init_wait);
+
if (chn_index >= nvscdev->num_chn)
return;
@@ -1015,8 +1022,10 @@ int rndis_filter_device_add(struct hv_device *dev,
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
u32 num_rss_qs;
+ u32 sc_delta;
const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
+ unsigned long flags;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -1039,6 +1048,8 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->max_chn = 1;
net_device->num_chn = 1;
+ spin_lock_init(&net_device->sc_lock);
+
net_device->extension = rndis_device;
rndis_device->net_dev = net_device;
@@ -1054,7 +1065,7 @@ int rndis_filter_device_add(struct hv_device *dev,
ret = rndis_filter_query_device(rndis_device,
RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
&mtu, &size);
- if (ret == 0 && size == sizeof(u32))
+ if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
net_device->ndev->mtu = mtu;
/* Get the mac address */
@@ -1114,7 +1125,15 @@ int rndis_filter_device_add(struct hv_device *dev,
*/
node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
num_possible_rss_qs = cpumask_weight(node_cpu_mask);
- net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
+ /* We will use the given number of channels if available. */
+ if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
+ net_device->num_chn = device_info->num_chn;
+ else
+ net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
+ num_rss_qs = net_device->num_chn - 1;
+ net_device->num_sc_offered = num_rss_qs;
if (net_device->num_chn == 1)
goto out;
@@ -1157,11 +1176,25 @@ int rndis_filter_device_add(struct hv_device *dev,
ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
+ /*
+ * Wait for the host to send us the sub-channel offers.
+ */
+ spin_lock_irqsave(&net_device->sc_lock, flags);
+ sc_delta = num_rss_qs - (net_device->num_chn - 1);
+ net_device->num_sc_offered -= sc_delta;
+ spin_unlock_irqrestore(&net_device->sc_lock, flags);
+
+ while (net_device->num_sc_offered != 0) {
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ);
+ if (t == 0)
+ WARN(1, "Netvsc: Waiting for sub-channel processing");
+ }
out:
if (ret) {
net_device->max_chn = 1;
net_device->num_chn = 1;
}
+
return 0; /* return 0 because primary channel can be used alone */
err_dev_remv:
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index f7bd9f3ddaac..d0d5bf6cbb68 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -545,7 +545,9 @@ at86rf230_async_state_delay(void *context)
}
/* Default delay is 1us in the most cases */
- tim = ktime_set(0, NSEC_PER_USEC);
+ udelay(1);
+ at86rf230_async_state_timer(&ctx->timer);
+ return;
change:
hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index b6fc29579667..613dae559925 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -1151,7 +1151,6 @@ MODULE_DEVICE_TABLE(of, cc2520_of_ids);
static struct spi_driver cc2520_driver = {
.driver = {
.name = "cc2520",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(cc2520_of_ids),
},
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 2549760e039f..997724b8e434 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -812,7 +812,6 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
static struct spi_driver mrf24j40_driver = {
.driver = {
.name = "mrf24j40",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.id_table = mrf24j40_ids,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 94570aace241..cc56fac3c3f8 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -38,69 +38,68 @@
#include <net/net_namespace.h>
#define TX_Q_LIMIT 32
-struct ifb_private {
+struct ifb_q_private {
+ struct net_device *dev;
struct tasklet_struct ifb_tasklet;
- int tasklet_pending;
-
- struct u64_stats_sync rsync;
+ int tasklet_pending;
+ int txqnum;
struct sk_buff_head rq;
- u64 rx_packets;
- u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ struct u64_stats_sync rsync;
struct u64_stats_sync tsync;
+ u64 tx_packets;
+ u64 tx_bytes;
struct sk_buff_head tq;
- u64 tx_packets;
- u64 tx_bytes;
-};
+} ____cacheline_aligned_in_smp;
-static int numifbs = 2;
+struct ifb_dev_private {
+ struct ifb_q_private *tx_private;
+};
-static void ri_tasklet(unsigned long dev);
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
static int ifb_open(struct net_device *dev);
static int ifb_close(struct net_device *dev);
-static void ri_tasklet(unsigned long dev)
+static void ifb_ri_tasklet(unsigned long _txp)
{
- struct net_device *_dev = (struct net_device *)dev;
- struct ifb_private *dp = netdev_priv(_dev);
+ struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
struct netdev_queue *txq;
struct sk_buff *skb;
- txq = netdev_get_tx_queue(_dev, 0);
- if ((skb = skb_peek(&dp->tq)) == NULL) {
- if (__netif_tx_trylock(txq)) {
- skb_queue_splice_tail_init(&dp->rq, &dp->tq);
- __netif_tx_unlock(txq);
- } else {
- /* reschedule */
+ txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
+ skb = skb_peek(&txp->tq);
+ if (!skb) {
+ if (!__netif_tx_trylock(txq))
goto resched;
- }
+ skb_queue_splice_tail_init(&txp->rq, &txp->tq);
+ __netif_tx_unlock(txq);
}
- while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
+ while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
u32 from = G_TC_FROM(skb->tc_verd);
skb->tc_verd = 0;
skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
- u64_stats_update_begin(&dp->tsync);
- dp->tx_packets++;
- dp->tx_bytes += skb->len;
- u64_stats_update_end(&dp->tsync);
+ u64_stats_update_begin(&txp->tsync);
+ txp->tx_packets++;
+ txp->tx_bytes += skb->len;
+ u64_stats_update_end(&txp->tsync);
rcu_read_lock();
- skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
+ skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
if (!skb->dev) {
rcu_read_unlock();
dev_kfree_skb(skb);
- _dev->stats.tx_dropped++;
- if (skb_queue_len(&dp->tq) != 0)
+ txp->dev->stats.tx_dropped++;
+ if (skb_queue_len(&txp->tq) != 0)
goto resched;
break;
}
rcu_read_unlock();
- skb->skb_iif = _dev->ifindex;
+ skb->skb_iif = txp->dev->ifindex;
if (from & AT_EGRESS) {
dev_queue_xmit(skb);
@@ -112,10 +111,11 @@ static void ri_tasklet(unsigned long dev)
}
if (__netif_tx_trylock(txq)) {
- if ((skb = skb_peek(&dp->rq)) == NULL) {
- dp->tasklet_pending = 0;
- if (netif_queue_stopped(_dev))
- netif_wake_queue(_dev);
+ skb = skb_peek(&txp->rq);
+ if (!skb) {
+ txp->tasklet_pending = 0;
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
} else {
__netif_tx_unlock(txq);
goto resched;
@@ -123,8 +123,8 @@ static void ri_tasklet(unsigned long dev)
__netif_tx_unlock(txq);
} else {
resched:
- dp->tasklet_pending = 1;
- tasklet_schedule(&dp->ifb_tasklet);
+ txp->tasklet_pending = 1;
+ tasklet_schedule(&txp->ifb_tasklet);
}
}
@@ -132,29 +132,58 @@ resched:
static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct ifb_private *dp = netdev_priv(dev);
+ struct ifb_dev_private *dp = netdev_priv(dev);
+ struct ifb_q_private *txp = dp->tx_private;
unsigned int start;
-
- do {
- start = u64_stats_fetch_begin_irq(&dp->rsync);
- stats->rx_packets = dp->rx_packets;
- stats->rx_bytes = dp->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&dp->rsync, start));
-
- do {
- start = u64_stats_fetch_begin_irq(&dp->tsync);
-
- stats->tx_packets = dp->tx_packets;
- stats->tx_bytes = dp->tx_bytes;
-
- } while (u64_stats_fetch_retry_irq(&dp->tsync, start));
-
+ u64 packets, bytes;
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+ do {
+ start = u64_stats_fetch_begin_irq(&txp->rsync);
+ packets = txp->rx_packets;
+ bytes = txp->rx_bytes;
+ } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&txp->tsync);
+ packets = txp->tx_packets;
+ bytes = txp->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
return stats;
}
+static int ifb_dev_init(struct net_device *dev)
+{
+ struct ifb_dev_private *dp = netdev_priv(dev);
+ struct ifb_q_private *txp;
+ int i;
+
+ txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
+ if (!txp)
+ return -ENOMEM;
+ dp->tx_private = txp;
+ for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+ txp->txqnum = i;
+ txp->dev = dev;
+ __skb_queue_head_init(&txp->rq);
+ __skb_queue_head_init(&txp->tq);
+ u64_stats_init(&txp->rsync);
+ u64_stats_init(&txp->tsync);
+ tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
+ (unsigned long)txp);
+ netif_tx_start_queue(netdev_get_tx_queue(dev, i));
+ }
+ return 0;
+}
static const struct net_device_ops ifb_netdev_ops = {
.ndo_open = ifb_open,
@@ -162,6 +191,7 @@ static const struct net_device_ops ifb_netdev_ops = {
.ndo_get_stats64 = ifb_stats64,
.ndo_start_xmit = ifb_xmit,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_init = ifb_dev_init,
};
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
@@ -169,10 +199,24 @@ static const struct net_device_ops ifb_netdev_ops = {
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_TX)
+static void ifb_dev_free(struct net_device *dev)
+{
+ struct ifb_dev_private *dp = netdev_priv(dev);
+ struct ifb_q_private *txp = dp->tx_private;
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+ tasklet_kill(&txp->ifb_tasklet);
+ __skb_queue_purge(&txp->rq);
+ __skb_queue_purge(&txp->tq);
+ }
+ kfree(dp->tx_private);
+ free_netdev(dev);
+}
+
static void ifb_setup(struct net_device *dev)
{
/* Initialize the device structure. */
- dev->destructor = free_netdev;
dev->netdev_ops = &ifb_netdev_ops;
/* Fill in device structure with ethernet-generic values. */
@@ -188,17 +232,19 @@ static void ifb_setup(struct net_device *dev)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
eth_hw_addr_random(dev);
+ dev->destructor = ifb_dev_free;
}
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct ifb_private *dp = netdev_priv(dev);
+ struct ifb_dev_private *dp = netdev_priv(dev);
u32 from = G_TC_FROM(skb->tc_verd);
+ struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
- u64_stats_update_begin(&dp->rsync);
- dp->rx_packets++;
- dp->rx_bytes += skb->len;
- u64_stats_update_end(&dp->rsync);
+ u64_stats_update_begin(&txp->rsync);
+ txp->rx_packets++;
+ txp->rx_bytes += skb->len;
+ u64_stats_update_end(&txp->rsync);
if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
dev_kfree_skb(skb);
@@ -206,14 +252,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
- netif_stop_queue(dev);
- }
+ if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
- __skb_queue_tail(&dp->rq, skb);
- if (!dp->tasklet_pending) {
- dp->tasklet_pending = 1;
- tasklet_schedule(&dp->ifb_tasklet);
+ __skb_queue_tail(&txp->rq, skb);
+ if (!txp->tasklet_pending) {
+ txp->tasklet_pending = 1;
+ tasklet_schedule(&txp->ifb_tasklet);
}
return NETDEV_TX_OK;
@@ -221,24 +266,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
static int ifb_close(struct net_device *dev)
{
- struct ifb_private *dp = netdev_priv(dev);
-
- tasklet_kill(&dp->ifb_tasklet);
- netif_stop_queue(dev);
- __skb_queue_purge(&dp->rq);
- __skb_queue_purge(&dp->tq);
+ netif_tx_stop_all_queues(dev);
return 0;
}
static int ifb_open(struct net_device *dev)
{
- struct ifb_private *dp = netdev_priv(dev);
-
- tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
- __skb_queue_head_init(&dp->rq);
- __skb_queue_head_init(&dp->tq);
- netif_start_queue(dev);
-
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -255,31 +289,30 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
static struct rtnl_link_ops ifb_link_ops __read_mostly = {
.kind = "ifb",
- .priv_size = sizeof(struct ifb_private),
+ .priv_size = sizeof(struct ifb_dev_private),
.setup = ifb_setup,
.validate = ifb_validate,
};
-/* Number of ifb devices to be set up by this module. */
+/* Number of ifb devices to be set up by this module.
+ * Note that these legacy devices have one queue.
+ * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
+ */
+static int numifbs = 2;
module_param(numifbs, int, 0);
MODULE_PARM_DESC(numifbs, "Number of ifb devices");
static int __init ifb_init_one(int index)
{
struct net_device *dev_ifb;
- struct ifb_private *dp;
int err;
- dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d",
+ dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
NET_NAME_UNKNOWN, ifb_setup);
if (!dev_ifb)
return -ENOMEM;
- dp = netdev_priv(dev_ifb);
- u64_stats_init(&dp->rsync);
- u64_stats_init(&dp->tsync);
-
dev_ifb->rtnl_link_ops = &ifb_link_ops;
err = register_netdevice(dev_ifb);
if (err < 0)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9f59f17dc317..47da43595ac2 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1047,6 +1047,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
#endif
.ndo_get_iflink = macvlan_dev_get_iflink,
+ .ndo_features_check = passthru_features_check,
};
void macvlan_common_setup(struct net_device *dev)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cb86d7a01542..c07030dbe748 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -14,6 +14,11 @@ if PHYLIB
comment "MII PHY device drivers"
+config AQUANTIA_PHY
+ tristate "Drivers for the Aquantia PHYs"
+ ---help---
+ Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+
config AT803X_PHY
tristate "Drivers for Atheros AT803X PHYs"
---help---
@@ -54,6 +59,11 @@ config VITESSE_PHY
---help---
Currently supports the vsc8244
+config TERANETICS_PHY
+ tristate "Drivers for the Teranetics PHYs"
+ ---help---
+ Currently supports the Teranetics TN2020
+
config SMSC_PHY
tristate "Drivers for SMSC PHYs"
---help---
@@ -145,13 +155,13 @@ config MDIO_GPIO
will be called mdio-gpio.
config MDIO_OCTEON
- tristate "Support for MDIO buses on Octeon SOCs"
- depends on CAVIUM_OCTEON_SOC
- default y
+ tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
+ depends on 64BIT
help
- This module provides a driver for the Octeon MDIO busses.
- It is required by the Octeon Ethernet device drivers.
+ This module provides a driver for the Octeon and ThunderX MDIO
+ busses. It is required by the Octeon and ThunderX ethernet device
+ drivers.
If in doubt, say Y.
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index fcc25a0c45cd..9bb103358c74 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -3,12 +3,14 @@
libphy-objs := phy.o phy_device.o mdio_bus.o
obj-$(CONFIG_PHYLIB) += libphy.o
+obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
+obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
new file mode 100644
index 000000000000..73d347d7cb04
--- /dev/null
+++ b/drivers/net/phy/aquantia.c
@@ -0,0 +1,152 @@
+/*
+ * Driver for Aquantia PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+
+#define PHY_ID_AQ1202 0x03a1b445
+#define PHY_ID_AQ2104 0x03a1b460
+#define PHY_ID_AQR105 0x03a1b4a2
+#define PHY_ID_AQR405 0x03a1b4b0
+
+#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
+ SUPPORTED_1000baseT_Full | \
+ SUPPORTED_100baseT_Full | \
+ PHY_DEFAULT_FEATURES)
+
+static int aquantia_config_aneg(struct phy_device *phydev)
+{
+ phydev->supported = PHY_AQUANTIA_FEATURES;
+ phydev->advertising = phydev->supported;
+
+ return 0;
+}
+
+static int aquantia_aneg_done(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+}
+
+static int aquantia_read_status(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS)
+ phydev->link = 1;
+ else
+ phydev->link = 0;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+ mdelay(10);
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+
+ switch (reg) {
+ case 0x9:
+ phydev->speed = SPEED_2500;
+ break;
+ case 0x5:
+ phydev->speed = SPEED_1000;
+ break;
+ case 0x3:
+ phydev->speed = SPEED_100;
+ break;
+ case 0x7:
+ default:
+ phydev->speed = SPEED_10000;
+ break;
+ }
+ phydev->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static struct phy_driver aquantia_driver[] = {
+{
+ .phy_id = PHY_ID_AQ1202,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQ1202",
+ .features = PHY_AQUANTIA_FEATURES,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .read_status = aquantia_read_status,
+ .driver = { .owner = THIS_MODULE,},
+},
+{
+ .phy_id = PHY_ID_AQ2104,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQ2104",
+ .features = PHY_AQUANTIA_FEATURES,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .read_status = aquantia_read_status,
+ .driver = { .owner = THIS_MODULE,},
+},
+{
+ .phy_id = PHY_ID_AQR105,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQR105",
+ .features = PHY_AQUANTIA_FEATURES,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .read_status = aquantia_read_status,
+ .driver = { .owner = THIS_MODULE,},
+},
+{
+ .phy_id = PHY_ID_AQR405,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQR405",
+ .features = PHY_AQUANTIA_FEATURES,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .read_status = aquantia_read_status,
+ .driver = { .owner = THIS_MODULE,},
+},
+};
+
+static int __init aquantia_init(void)
+{
+ return phy_drivers_register(aquantia_driver,
+ ARRAY_SIZE(aquantia_driver));
+}
+
+static void __exit aquantia_exit(void)
+{
+ return phy_drivers_unregister(aquantia_driver,
+ ARRAY_SIZE(aquantia_driver));
+}
+
+module_init(aquantia_init);
+module_exit(aquantia_exit);
+
+static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
+ { PHY_ID_AQ1202, 0xfffffff0 },
+ { PHY_ID_AQ2104, 0xfffffff0 },
+ { PHY_ID_AQR105, 0xfffffff0 },
+ { PHY_ID_AQR405, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, aquantia_tbl);
+
+MODULE_DESCRIPTION("Aquantia PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 00cb41e71312..185b03c08e16 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1449,17 +1449,9 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 8a3bf5469892..32f10662f4ac 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -123,12 +123,8 @@ static int dp83867_of_init(struct phy_device *phydev)
if (ret)
return ret;
- ret = of_property_read_u32(of_node, "ti,fifo-depth",
+ return of_property_read_u32(of_node, "ti,fifo-depth",
&dp83867->fifo_depth);
- if (ret)
- return ret;
-
- return 0;
}
#else
static int dp83867_of_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 1960b46add65..479b93f9581c 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -52,6 +52,10 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
u16 lpagb = 0;
u16 lpa = 0;
+ if (!fp->status.link)
+ goto done;
+ bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
+
if (fp->status.duplex) {
bmcr |= BMCR_FULLDPLX;
@@ -96,15 +100,13 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
}
}
- if (fp->status.link)
- bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
-
if (fp->status.pause)
lpa |= LPA_PAUSE_CAP;
if (fp->status.asym_pause)
lpa |= LPA_PAUSE_ASYM;
+done:
fp->regs[MII_PHYSID1] = 0;
fp->regs[MII_PHYSID2] = 0;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f721444c2b0a..e6897b6a8a53 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -48,8 +48,11 @@
#define MII_M1011_IMASK_CLEAR 0x0000
#define MII_M1011_PHY_SCR 0x10
+#define MII_M1011_PHY_SCR_MDI 0x0000
+#define MII_M1011_PHY_SCR_MDI_X 0x0020
#define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060
+#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16
#define MII_M1145_PHY_EXT_SR 0x1b
#define MII_M1145_PHY_EXT_CR 0x14
#define MII_M1145_RGMII_RX_DELAY 0x0080
@@ -159,6 +162,43 @@ static int marvell_config_intr(struct phy_device *phydev)
return err;
}
+static int marvell_set_polarity(struct phy_device *phydev, int polarity)
+{
+ int reg;
+ int err;
+ int val;
+
+ /* get the current settings */
+ reg = phy_read(phydev, MII_M1011_PHY_SCR);
+ if (reg < 0)
+ return reg;
+
+ val = reg;
+ val &= ~MII_M1011_PHY_SCR_AUTO_CROSS;
+ switch (polarity) {
+ case ETH_TP_MDI:
+ val |= MII_M1011_PHY_SCR_MDI;
+ break;
+ case ETH_TP_MDI_X:
+ val |= MII_M1011_PHY_SCR_MDI_X;
+ break;
+ case ETH_TP_MDI_AUTO:
+ case ETH_TP_MDI_INVALID:
+ default:
+ val |= MII_M1011_PHY_SCR_AUTO_CROSS;
+ break;
+ }
+
+ if (val != reg) {
+ /* Set the new polarity value in the register */
+ err = phy_write(phydev, MII_M1011_PHY_SCR, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int marvell_config_aneg(struct phy_device *phydev)
{
int err;
@@ -191,8 +231,7 @@ static int marvell_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_M1011_PHY_SCR,
- MII_M1011_PHY_SCR_AUTO_CROSS);
+ err = marvell_set_polarity(phydev, phydev->mdix);
if (err < 0)
return err;
@@ -514,6 +553,16 @@ static int m88e1111_config_init(struct phy_device *phydev)
err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
if (err < 0)
return err;
+
+ /* make sure copper is selected */
+ err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE,
+ err & (~0xff));
+ if (err < 0)
+ return err;
}
if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index c838ad6155f7..fcf4e4df7cc8 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -7,6 +7,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -14,11 +15,12 @@
#include <linux/phy.h>
#include <linux/io.h>
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-smix-defs.h>
+#endif
-#define DRV_VERSION "1.0"
-#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+#define DRV_VERSION "1.1"
+#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver"
#define SMI_CMD 0x0
#define SMI_WR_DAT 0x8
@@ -26,6 +28,79 @@
#define SMI_CLK 0x18
#define SMI_EN 0x20
+#ifdef __BIG_ENDIAN_BITFIELD
+#define OCT_MDIO_BITFIELD_FIELD(field, more) \
+ field; \
+ more
+
+#else
+#define OCT_MDIO_BITFIELD_FIELD(field, more) \
+ more \
+ field;
+
+#endif
+
+union cvmx_smix_clk {
+ u64 u64;
+ struct cvmx_smix_clk_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39,
+ OCT_MDIO_BITFIELD_FIELD(u64 mode:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3,
+ OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5,
+ OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 preamble:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 sample:4,
+ OCT_MDIO_BITFIELD_FIELD(u64 phase:8,
+ ;))))))))))
+ } s;
+};
+
+union cvmx_smix_cmd {
+ u64 u64;
+ struct cvmx_smix_cmd_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+ OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3,
+ OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3,
+ OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5,
+ ;))))))
+ } s;
+};
+
+union cvmx_smix_en {
+ u64 u64;
+ struct cvmx_smix_en_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63,
+ OCT_MDIO_BITFIELD_FIELD(u64 en:1,
+ ;))
+ } s;
+};
+
+union cvmx_smix_rd_dat {
+ u64 u64;
+ struct cvmx_smix_rd_dat_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+ OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+ ;))))
+ } s;
+};
+
+union cvmx_smix_wr_dat {
+ u64 u64;
+ struct cvmx_smix_wr_dat_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+ OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+ ;))))
+ } s;
+};
+
enum octeon_mdiobus_mode {
UNINIT = 0,
C22,
@@ -41,6 +116,21 @@ struct octeon_mdiobus {
int phy_irq[PHY_MAX_ADDR];
};
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+static void oct_mdio_writeq(u64 val, u64 addr)
+{
+ cvmx_write_csr(addr, val);
+}
+
+static u64 oct_mdio_readq(u64 addr)
+{
+ return cvmx_read_csr(addr);
+}
+#else
+#define oct_mdio_writeq(val, addr) writeq_relaxed(val, (void *)addr)
+#define oct_mdio_readq(addr) readq_relaxed((void *)addr)
+#endif
+
static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
enum octeon_mdiobus_mode m)
{
@@ -49,10 +139,10 @@ static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
if (m == p->mode)
return;
- smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK);
+ smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
smi_clk.s.mode = (m == C45) ? 1 : 0;
smi_clk.s.preamble = 1;
- cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64);
+ oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
p->mode = m;
}
@@ -67,7 +157,7 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
smi_wr.u64 = 0;
smi_wr.s.dat = regnum & 0xffff;
- cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+ oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
regnum = (regnum >> 16) & 0x1f;
@@ -75,14 +165,14 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
- cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
/* Wait 1000 clocks so we don't saturate the RSL bus
* doing reads.
*/
__delay(1000);
- smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+ smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0)
@@ -114,14 +204,14 @@ static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
smi_cmd.s.phy_op = op;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
- cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
/* Wait 1000 clocks so we don't saturate the RSL bus
* doing reads.
*/
__delay(1000);
- smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT);
+ smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
} while (smi_rd.s.pending && --timeout);
if (smi_rd.s.val)
@@ -153,20 +243,20 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
smi_wr.u64 = 0;
smi_wr.s.dat = val;
- cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+ oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = op;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
- cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
/* Wait 1000 clocks so we don't saturate the RSL bus
* doing reads.
*/
__delay(1000);
- smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+ smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0)
@@ -187,30 +277,34 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
return -ENOMEM;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
if (res_mem == NULL) {
dev_err(&pdev->dev, "found no memory resource\n");
- err = -ENXIO;
- goto fail;
+ return -ENXIO;
}
+
bus->mdio_phys = res_mem->start;
bus->regsize = resource_size(res_mem);
+
if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize,
res_mem->name)) {
dev_err(&pdev->dev, "request_mem_region failed\n");
- goto fail;
+ return -ENXIO;
}
+
bus->register_base =
(u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize);
+ if (!bus->register_base) {
+ dev_err(&pdev->dev, "dev_ioremap failed\n");
+ return -ENOMEM;
+ }
bus->mii_bus = mdiobus_alloc();
-
if (!bus->mii_bus)
goto fail;
smi_en.u64 = 0;
smi_en.s.en = 1;
- cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+ oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
bus->mii_bus->priv = bus;
bus->mii_bus->irq = bus->phy_irq;
@@ -234,7 +328,7 @@ fail_register:
mdiobus_free(bus->mii_bus);
fail:
smi_en.u64 = 0;
- cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+ oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return err;
}
@@ -248,7 +342,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
mdiobus_unregister(bus->mii_bus);
mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
- cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+ oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return 0;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b2197b506acb..84b1fba58ac3 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -353,6 +353,8 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
phydev->duplex = cmd->duplex;
+ phydev->mdix = cmd->eth_tp_mdix_ctrl;
+
/* Restart the PHY */
phy_start_aneg(phydev);
@@ -377,6 +379,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
cmd->transceiver = phy_is_internal(phydev) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->autoneg = phydev->autoneg;
+ cmd->eth_tp_mdix_ctrl = phydev->mdix;
return 0;
}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 45353613b2ed..43ab691362d4 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -137,6 +137,19 @@ static struct phy_driver realtek_drvs[] = {
.config_intr = &rtl8211b_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = 0x001cc914,
+ .name = "RTL8211DN Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = rtl821x_ack_interrupt,
+ .config_intr = rtl8211e_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+ }, {
.phy_id = 0x001cc915,
.name = "RTL8211E Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
@@ -170,6 +183,7 @@ module_phy_driver(realtek_drvs);
static struct mdio_device_id __maybe_unused realtek_tbl[] = {
{ 0x001cc912, 0x001fffff },
+ { 0x001cc914, 0x001fffff },
{ 0x001cc915, 0x001fffff },
{ 0x001cc916, 0x001fffff },
{ }
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 46530159256b..f091d691cf6f 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -209,8 +209,6 @@ static int ks8995_reset(struct ks8995_switch *ks)
return ks8995_start(ks);
}
-/* ------------------------------------------------------------------------ */
-
static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
@@ -220,19 +218,9 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
dev = container_of(kobj, struct device, kobj);
ks8995 = dev_get_drvdata(dev);
- if (unlikely(off > ks8995->regs_attr.size))
- return 0;
-
- if ((off + count) > ks8995->regs_attr.size)
- count = ks8995->regs_attr.size - off;
-
- if (unlikely(!count))
- return count;
-
return ks8995_read(ks8995, buf, off, count);
}
-
static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
@@ -242,19 +230,9 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
dev = container_of(kobj, struct device, kobj);
ks8995 = dev_get_drvdata(dev);
- if (unlikely(off >= ks8995->regs_attr.size))
- return -EFBIG;
-
- if ((off + count) > ks8995->regs_attr.size)
- count = ks8995->regs_attr.size - off;
-
- if (unlikely(!count))
- return count;
-
return ks8995_write(ks8995, buf, off, count);
}
-
static const struct bin_attribute ks8995_registers_attr = {
.attr = {
.name = "registers",
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
new file mode 100644
index 000000000000..91e1bec6079f
--- /dev/null
+++ b/drivers/net/phy/teranetics.c
@@ -0,0 +1,135 @@
+/*
+ * Driver for Teranetics PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+MODULE_DESCRIPTION("Teranetics PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
+
+#define PHY_ID_TN2020 0x00a19410
+#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001
+#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002
+#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004
+#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008
+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000
+
+#define MDIO_PHYXS_LANE_READY (MDIO_PHYXS_LNSTAT_SYNC0 | \
+ MDIO_PHYXS_LNSTAT_SYNC1 | \
+ MDIO_PHYXS_LNSTAT_SYNC2 | \
+ MDIO_PHYXS_LNSTAT_SYNC3 | \
+ MDIO_PHYXS_LNSTAT_ALIGN)
+
+static int teranetics_config_init(struct phy_device *phydev)
+{
+ phydev->supported = SUPPORTED_10000baseT_Full;
+ phydev->advertising = SUPPORTED_10000baseT_Full;
+
+ return 0;
+}
+
+static int teranetics_soft_reset(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int teranetics_aneg_done(struct phy_device *phydev)
+{
+ int reg;
+
+ /* auto negotiation state can only be checked when using copper
+ * port, if using fiber port, just lie it's done.
+ */
+ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+ }
+
+ return 1;
+}
+
+static int teranetics_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int teranetics_read_status(struct phy_device *phydev)
+{
+ int reg;
+
+ phydev->link = 1;
+
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+
+ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+ reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT);
+ if (reg < 0 ||
+ !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+ phydev->link = 0;
+ }
+
+ return 0;
+}
+
+static int teranetics_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020;
+}
+
+static struct phy_driver teranetics_driver[] = {
+{
+ .phy_id = PHY_ID_TN2020,
+ .phy_id_mask = 0xffffffff,
+ .name = "Teranetics TN2020",
+ .soft_reset = teranetics_soft_reset,
+ .aneg_done = teranetics_aneg_done,
+ .config_init = teranetics_config_init,
+ .config_aneg = teranetics_config_aneg,
+ .read_status = teranetics_read_status,
+ .match_phy_device = teranetics_match_phy_device,
+ .driver = { .owner = THIS_MODULE,},
+},
+};
+
+static int __init teranetics_init(void)
+{
+ return phy_drivers_register(teranetics_driver,
+ ARRAY_SIZE(teranetics_driver));
+}
+
+static void __exit teranetics_exit(void)
+{
+ return phy_drivers_unregister(teranetics_driver,
+ ARRAY_SIZE(teranetics_driver));
+}
+
+module_init(teranetics_init);
+module_exit(teranetics_exit);
+
+static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+ { PHY_ID_TN2020, 0xffffffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, teranetics_tbl);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 06a039414628..976aa9704297 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -961,6 +961,7 @@ static const struct net_device_ops tap_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
+ .ndo_features_check = passthru_features_check,
};
static void tun_flow_init(struct tun_struct *tun)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7ba8d0885f12..1610b79ae386 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -106,6 +106,16 @@ config USB_RTL8152
To compile this driver as a module, choose M here: the
module will be called r8152.
+config USB_LAN78XX
+ tristate "Microchip LAN78XX Based USB Ethernet Adapters"
+ select MII
+ help
+ This option adds support for Microchip LAN78XX based USB 2
+ & USB 3 10/100/1000 Ethernet adapters.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lan78xx.
+
config USB_USBNET
tristate "Multi-purpose USB Networking Framework"
select MII
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index e2797f1e1b31..cf6a0e610a7f 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o
obj-$(CONFIG_USB_RTL8150) += rtl8150.o
obj-$(CONFIG_USB_RTL8152) += r8152.o
obj-$(CONFIG_USB_HSO) += hso.o
+obj-$(CONFIG_USB_LAN78XX) += lan78xx.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
new file mode 100644
index 000000000000..39364a45af40
--- /dev/null
+++ b/drivers/net/usb/lan78xx.c
@@ -0,0 +1,3495 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mdio.h>
+#include <net/ip6_checksum.h>
+#include "lan78xx.h"
+
+#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
+#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
+#define DRIVER_NAME "lan78xx"
+#define DRIVER_VERSION "1.0.0"
+
+#define TX_TIMEOUT_JIFFIES (5 * HZ)
+#define THROTTLE_JIFFIES (HZ / 8)
+#define UNLINK_TIMEOUT_MS 3
+
+#define RX_MAX_QUEUE_MEMORY (60 * 1518)
+
+#define SS_USB_PKT_SIZE (1024)
+#define HS_USB_PKT_SIZE (512)
+#define FS_USB_PKT_SIZE (64)
+
+#define MAX_RX_FIFO_SIZE (12 * 1024)
+#define MAX_TX_FIFO_SIZE (12 * 1024)
+#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
+#define DEFAULT_BULK_IN_DELAY (0x0800)
+#define MAX_SINGLE_PACKET_SIZE (9000)
+#define DEFAULT_TX_CSUM_ENABLE (true)
+#define DEFAULT_RX_CSUM_ENABLE (true)
+#define DEFAULT_TSO_CSUM_ENABLE (true)
+#define DEFAULT_VLAN_FILTER_ENABLE (true)
+#define INTERNAL_PHY_ID (2) /* 2: GMII */
+#define TX_OVERHEAD (8)
+#define RXW_PADDING 2
+
+#define LAN78XX_USB_VENDOR_ID (0x0424)
+#define LAN7800_USB_PRODUCT_ID (0x7800)
+#define LAN7850_USB_PRODUCT_ID (0x7850)
+#define LAN78XX_EEPROM_MAGIC (0x78A5)
+#define LAN78XX_OTP_MAGIC (0x78F3)
+
+#define MII_READ 1
+#define MII_WRITE 0
+
+#define EEPROM_INDICATOR (0xA5)
+#define EEPROM_MAC_OFFSET (0x01)
+#define MAX_EEPROM_SIZE 512
+#define OTP_INDICATOR_1 (0xF3)
+#define OTP_INDICATOR_2 (0xF7)
+
+#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
+ WAKE_MCAST | WAKE_BCAST | \
+ WAKE_ARP | WAKE_MAGIC)
+
+/* USB related defines */
+#define BULK_IN_PIPE 1
+#define BULK_OUT_PIPE 2
+
+/* default autosuspend delay (mSec)*/
+#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
+
+static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
+ "RX FCS Errors",
+ "RX Alignment Errors",
+ "Rx Fragment Errors",
+ "RX Jabber Errors",
+ "RX Undersize Frame Errors",
+ "RX Oversize Frame Errors",
+ "RX Dropped Frames",
+ "RX Unicast Byte Count",
+ "RX Broadcast Byte Count",
+ "RX Multicast Byte Count",
+ "RX Unicast Frames",
+ "RX Broadcast Frames",
+ "RX Multicast Frames",
+ "RX Pause Frames",
+ "RX 64 Byte Frames",
+ "RX 65 - 127 Byte Frames",
+ "RX 128 - 255 Byte Frames",
+ "RX 256 - 511 Bytes Frames",
+ "RX 512 - 1023 Byte Frames",
+ "RX 1024 - 1518 Byte Frames",
+ "RX Greater 1518 Byte Frames",
+ "EEE RX LPI Transitions",
+ "EEE RX LPI Time",
+ "TX FCS Errors",
+ "TX Excess Deferral Errors",
+ "TX Carrier Errors",
+ "TX Bad Byte Count",
+ "TX Single Collisions",
+ "TX Multiple Collisions",
+ "TX Excessive Collision",
+ "TX Late Collisions",
+ "TX Unicast Byte Count",
+ "TX Broadcast Byte Count",
+ "TX Multicast Byte Count",
+ "TX Unicast Frames",
+ "TX Broadcast Frames",
+ "TX Multicast Frames",
+ "TX Pause Frames",
+ "TX 64 Byte Frames",
+ "TX 65 - 127 Byte Frames",
+ "TX 128 - 255 Byte Frames",
+ "TX 256 - 511 Bytes Frames",
+ "TX 512 - 1023 Byte Frames",
+ "TX 1024 - 1518 Byte Frames",
+ "TX Greater 1518 Byte Frames",
+ "EEE TX LPI Transitions",
+ "EEE TX LPI Time",
+};
+
+struct lan78xx_statstage {
+ u32 rx_fcs_errors;
+ u32 rx_alignment_errors;
+ u32 rx_fragment_errors;
+ u32 rx_jabber_errors;
+ u32 rx_undersize_frame_errors;
+ u32 rx_oversize_frame_errors;
+ u32 rx_dropped_frames;
+ u32 rx_unicast_byte_count;
+ u32 rx_broadcast_byte_count;
+ u32 rx_multicast_byte_count;
+ u32 rx_unicast_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_64_byte_frames;
+ u32 rx_65_127_byte_frames;
+ u32 rx_128_255_byte_frames;
+ u32 rx_256_511_bytes_frames;
+ u32 rx_512_1023_byte_frames;
+ u32 rx_1024_1518_byte_frames;
+ u32 rx_greater_1518_byte_frames;
+ u32 eee_rx_lpi_transitions;
+ u32 eee_rx_lpi_time;
+ u32 tx_fcs_errors;
+ u32 tx_excess_deferral_errors;
+ u32 tx_carrier_errors;
+ u32 tx_bad_byte_count;
+ u32 tx_single_collisions;
+ u32 tx_multiple_collisions;
+ u32 tx_excessive_collision;
+ u32 tx_late_collisions;
+ u32 tx_unicast_byte_count;
+ u32 tx_broadcast_byte_count;
+ u32 tx_multicast_byte_count;
+ u32 tx_unicast_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_64_byte_frames;
+ u32 tx_65_127_byte_frames;
+ u32 tx_128_255_byte_frames;
+ u32 tx_256_511_bytes_frames;
+ u32 tx_512_1023_byte_frames;
+ u32 tx_1024_1518_byte_frames;
+ u32 tx_greater_1518_byte_frames;
+ u32 eee_tx_lpi_transitions;
+ u32 eee_tx_lpi_time;
+};
+
+struct lan78xx_net;
+
+struct lan78xx_priv {
+ struct lan78xx_net *dev;
+ u32 rfe_ctl;
+ u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
+ u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
+ u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
+ struct mutex dataport_mutex; /* for dataport access */
+ spinlock_t rfe_ctl_lock; /* for rfe register access */
+ struct work_struct set_multicast;
+ struct work_struct set_vlan;
+ u32 wol;
+};
+
+enum skb_state {
+ illegal = 0,
+ tx_start,
+ tx_done,
+ rx_start,
+ rx_done,
+ rx_cleanup,
+ unlink_start
+};
+
+struct skb_data { /* skb->cb is one of these */
+ struct urb *urb;
+ struct lan78xx_net *dev;
+ enum skb_state state;
+ size_t length;
+};
+
+struct usb_context {
+ struct usb_ctrlrequest req;
+ struct lan78xx_net *dev;
+};
+
+#define EVENT_TX_HALT 0
+#define EVENT_RX_HALT 1
+#define EVENT_RX_MEMORY 2
+#define EVENT_STS_SPLIT 3
+#define EVENT_LINK_RESET 4
+#define EVENT_RX_PAUSED 5
+#define EVENT_DEV_WAKING 6
+#define EVENT_DEV_ASLEEP 7
+#define EVENT_DEV_OPEN 8
+
+struct lan78xx_net {
+ struct net_device *net;
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ void *driver_priv;
+
+ int rx_qlen;
+ int tx_qlen;
+ struct sk_buff_head rxq;
+ struct sk_buff_head txq;
+ struct sk_buff_head done;
+ struct sk_buff_head rxq_pause;
+ struct sk_buff_head txq_pend;
+
+ struct tasklet_struct bh;
+ struct delayed_work wq;
+
+ struct usb_host_endpoint *ep_blkin;
+ struct usb_host_endpoint *ep_blkout;
+ struct usb_host_endpoint *ep_intr;
+
+ int msg_enable;
+
+ struct urb *urb_intr;
+ struct usb_anchor deferred;
+
+ struct mutex phy_mutex; /* for phy access */
+ unsigned pipe_in, pipe_out, pipe_intr;
+
+ u32 hard_mtu; /* count any extra framing */
+ size_t rx_urb_size; /* size for rx urbs */
+
+ unsigned long flags;
+
+ wait_queue_head_t *wait;
+ unsigned char suspend_count;
+
+ unsigned maxpacket;
+ struct timer_list delay;
+
+ unsigned long data[5];
+ struct mii_if_info mii;
+
+ int link_on;
+ u8 mdix_ctrl;
+};
+
+/* use ethtool to change the level for any given device */
+static int msg_level = -1;
+module_param(msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Override default message level");
+
+static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
+{
+ u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ int ret;
+
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_READ_REGISTER,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+ if (likely(ret >= 0)) {
+ le32_to_cpus(buf);
+ *data = *buf;
+ } else {
+ netdev_warn(dev->net,
+ "Failed to read register index 0x%08x. ret = %d",
+ index, ret);
+ }
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
+{
+ u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ int ret;
+
+ if (!buf)
+ return -ENOMEM;
+
+ *buf = data;
+ cpu_to_le32s(buf);
+
+ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_WRITE_REGISTER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+ if (unlikely(ret < 0)) {
+ netdev_warn(dev->net,
+ "Failed to write register index 0x%08x. ret = %d",
+ index, ret);
+ }
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int lan78xx_read_stats(struct lan78xx_net *dev,
+ struct lan78xx_statstage *data)
+{
+ int ret = 0;
+ int i;
+ struct lan78xx_statstage *stats;
+ u32 *src;
+ u32 *dst;
+
+ stats = kmalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_GET_STATS,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ (void *)stats,
+ sizeof(*stats),
+ USB_CTRL_SET_TIMEOUT);
+ if (likely(ret >= 0)) {
+ src = (u32 *)stats;
+ dst = (u32 *)data;
+ for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
+ le32_to_cpus(&src[i]);
+ dst[i] = src[i];
+ }
+ } else {
+ netdev_warn(dev->net,
+ "Failed to read stat ret = 0x%x", ret);
+ }
+
+ kfree(stats);
+
+ return ret;
+}
+
+/* Loop until the read is completed with timeout called with phy_mutex held */
+static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+ int ret;
+
+ do {
+ ret = lan78xx_read_reg(dev, MII_ACC, &val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (!(val & MII_ACC_MII_BUSY_))
+ return 0;
+ } while (!time_after(jiffies, start_time + HZ));
+
+ return -EIO;
+}
+
+static inline u32 mii_access(int id, int index, int read)
+{
+ u32 ret;
+
+ ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
+ ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
+ if (read)
+ ret |= MII_ACC_MII_READ_;
+ else
+ ret |= MII_ACC_MII_WRITE_;
+ ret |= MII_ACC_MII_BUSY_;
+
+ return ret;
+}
+
+static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* set the address, index & direction (read from PHY) */
+ phy_id &= dev->mii.phy_id_mask;
+ idx &= dev->mii.reg_num_mask;
+ addr = mii_access(phy_id, idx, MII_READ);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+ ret = (int)(val & 0xFFFF);
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+ usb_autopm_put_interface(dev->intf);
+ return ret;
+}
+
+static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
+ int idx, int regval)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ val = regval;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ /* set the address, index & direction (write to PHY) */
+ phy_id &= dev->mii.phy_id_mask;
+ idx &= dev->mii.reg_num_mask;
+ addr = mii_access(phy_id, idx, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+ usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
+ int mmddev, int mmdidx, int regval)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ mmddev &= 0x1F;
+
+ /* set up device address for MMD */
+ ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* select register of MMD */
+ val = mmdidx;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* select register data for MMD */
+ val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* write to MMD */
+ val = regval;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+ usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
+ int mmddev, int mmdidx)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* set up device address for MMD */
+ ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* select register of MMD */
+ val = mmdidx;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* select register data for MMD */
+ val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* set the address, index & direction (read from PHY) */
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* read from MMD */
+ ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+ ret = (int)(val & 0xFFFF);
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+ usb_autopm_put_interface(dev->intf);
+ return ret;
+}
+
+static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+ int ret;
+
+ do {
+ ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (!(val & E2P_CMD_EPC_BUSY_) ||
+ (val & E2P_CMD_EPC_TIMEOUT_))
+ break;
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+ netdev_warn(dev->net, "EEPROM read operation timeout");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+ int ret;
+
+ do {
+ ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (!(val & E2P_CMD_EPC_BUSY_))
+ return 0;
+
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ netdev_warn(dev->net, "EEPROM is busy");
+ return -EIO;
+}
+
+static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ u32 val;
+ int i, ret;
+
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < length; i++) {
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ ret = lan78xx_write_reg(dev, E2P_CMD, val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ ret = lan78xx_wait_eeprom(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, E2P_DATA, &val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ u8 sig;
+ int ret;
+
+ ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+ if ((ret == 0) && (sig == EEPROM_INDICATOR))
+ ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ u32 val;
+ int i, ret;
+
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ if (ret)
+ return ret;
+
+ /* Issue write/erase enable command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+ ret = lan78xx_write_reg(dev, E2P_CMD, val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ ret = lan78xx_wait_eeprom(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < length; i++) {
+ /* Fill data register */
+ val = data[i];
+ ret = lan78xx_write_reg(dev, E2P_DATA, val);
+ if (ret < 0)
+ return ret;
+
+ /* Send "write" command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ ret = lan78xx_write_reg(dev, E2P_CMD, val);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_wait_eeprom(dev);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ int i;
+ int ret;
+ u32 buf;
+ unsigned long timeout;
+
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+
+ if (buf & OTP_PWR_DN_PWRDN_N_) {
+ /* clear it and wait to be cleared */
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+
+ timeout = jiffies + HZ;
+ do {
+ usleep_range(1, 10);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(dev->net,
+ "timeout on OTP_PWR_DN");
+ return -EIO;
+ }
+ } while (buf & OTP_PWR_DN_PWRDN_N_);
+ }
+
+ for (i = 0; i < length; i++) {
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+
+ ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ timeout = jiffies + HZ;
+ do {
+ udelay(1);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(dev->net,
+ "timeout on OTP_STATUS");
+ return -EIO;
+ }
+ } while (buf & OTP_STATUS_BUSY_);
+
+ ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+
+ data[i] = (u8)(buf & 0xFF);
+ }
+
+ return 0;
+}
+
+static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ u8 sig;
+ int ret;
+
+ ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
+
+ if (ret == 0) {
+ if (sig == OTP_INDICATOR_1)
+ offset = offset;
+ else if (sig == OTP_INDICATOR_2)
+ offset += 0x100;
+ else
+ ret = -EINVAL;
+ ret = lan78xx_read_raw_otp(dev, offset, length, data);
+ }
+
+ return ret;
+}
+
+static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < 100; i++) {
+ u32 dp_sel;
+
+ ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (dp_sel & DP_SEL_DPRDY_)
+ return 0;
+
+ usleep_range(40, 100);
+ }
+
+ netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
+
+ return -EIO;
+}
+
+static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
+ u32 addr, u32 length, u32 *buf)
+{
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u32 dp_sel;
+ int i, ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return 0;
+
+ mutex_lock(&pdata->dataport_mutex);
+
+ ret = lan78xx_dataport_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+
+ dp_sel &= ~DP_SEL_RSEL_MASK_;
+ dp_sel |= ram_select;
+ ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+
+ for (i = 0; i < length; i++) {
+ ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+
+ ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+
+ ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+
+ ret = lan78xx_dataport_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+ }
+
+done:
+ mutex_unlock(&pdata->dataport_mutex);
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
+ int index, u8 addr[ETH_ALEN])
+{
+ u32 temp;
+
+ if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
+ temp = addr[3];
+ temp = addr[2] | (temp << 8);
+ temp = addr[1] | (temp << 8);
+ temp = addr[0] | (temp << 8);
+ pdata->pfilter_table[index][1] = temp;
+ temp = addr[5];
+ temp = addr[4] | (temp << 8);
+ temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
+ pdata->pfilter_table[index][0] = temp;
+ }
+}
+
+/* returns hash bit number for given MAC address */
+static inline u32 lan78xx_hash(char addr[ETH_ALEN])
+{
+ return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
+}
+
+static void lan78xx_deferred_multicast_write(struct work_struct *param)
+{
+ struct lan78xx_priv *pdata =
+ container_of(param, struct lan78xx_priv, set_multicast);
+ struct lan78xx_net *dev = pdata->dev;
+ int i;
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
+ pdata->rfe_ctl);
+
+ lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
+ DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+
+ for (i = 1; i < NUM_OF_MAF; i++) {
+ ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+ ret = lan78xx_write_reg(dev, MAF_LO(i),
+ pdata->pfilter_table[i][1]);
+ ret = lan78xx_write_reg(dev, MAF_HI(i),
+ pdata->pfilter_table[i][0]);
+ }
+
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+}
+
+static void lan78xx_set_multicast(struct net_device *netdev)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+ pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
+ RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
+
+ for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
+ pdata->mchash_table[i] = 0;
+ /* pfilter_table[0] has own HW address */
+ for (i = 1; i < NUM_OF_MAF; i++) {
+ pdata->pfilter_table[i][0] =
+ pdata->pfilter_table[i][1] = 0;
+ }
+
+ pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
+
+ if (dev->net->flags & IFF_PROMISC) {
+ netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
+ pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
+ } else {
+ if (dev->net->flags & IFF_ALLMULTI) {
+ netif_dbg(dev, drv, dev->net,
+ "receive all multicast enabled");
+ pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
+ }
+ }
+
+ if (netdev_mc_count(dev->net)) {
+ struct netdev_hw_addr *ha;
+ int i;
+
+ netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
+
+ pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
+
+ i = 1;
+ netdev_for_each_mc_addr(ha, netdev) {
+ /* set first 32 into Perfect Filter */
+ if (i < 33) {
+ lan78xx_set_addr_filter(pdata, i, ha->addr);
+ } else {
+ u32 bitnum = lan78xx_hash(ha->addr);
+
+ pdata->mchash_table[bitnum / 32] |=
+ (1 << (bitnum % 32));
+ pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
+ }
+ i++;
+ }
+ }
+
+ spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+ /* defer register writes to a sleepable context */
+ schedule_work(&pdata->set_multicast);
+}
+
+static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
+ u16 lcladv, u16 rmtadv)
+{
+ u32 flow = 0, fct_flow = 0;
+ int ret;
+
+ u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_TX)
+ flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
+
+ if (cap & FLOW_CTRL_RX)
+ flow |= FLOW_CR_RX_FCEN_;
+
+ if (dev->udev->speed == USB_SPEED_SUPER)
+ fct_flow = 0x817;
+ else if (dev->udev->speed == USB_SPEED_HIGH)
+ fct_flow = 0x211;
+
+ netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
+ (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+ (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+
+ ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+
+ /* threshold value should be set before enabling flow */
+ ret = lan78xx_write_reg(dev, FLOW, flow);
+
+ return 0;
+}
+
+static int lan78xx_link_reset(struct lan78xx_net *dev)
+{
+ struct mii_if_info *mii = &dev->mii;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ u16 ladv, radv;
+ int ret;
+ u32 buf;
+
+ /* clear PHY interrupt status */
+ /* VTSE PHY */
+ ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ /* clear LAN78xx interrupt status */
+ ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (!mii_link_ok(mii) && dev->link_on) {
+ dev->link_on = false;
+ netif_carrier_off(dev->net);
+
+ /* reset MAC */
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ if (unlikely(ret < 0))
+ return -EIO;
+ buf |= MAC_CR_RST_;
+ ret = lan78xx_write_reg(dev, MAC_CR, buf);
+ if (unlikely(ret < 0))
+ return -EIO;
+ } else if (mii_link_ok(mii) && !dev->link_on) {
+ dev->link_on = true;
+
+ mii_check_media(mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+
+ mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+
+ if (dev->udev->speed == USB_SPEED_SUPER) {
+ if (ethtool_cmd_speed(&ecmd) == 1000) {
+ /* disable U2 */
+ ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
+ ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+ /* enable U1 */
+ ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ buf |= USB_CFG1_DEV_U1_INIT_EN_;
+ ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+ } else {
+ /* enable U1 & U2 */
+ ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ buf |= USB_CFG1_DEV_U2_INIT_EN_;
+ buf |= USB_CFG1_DEV_U1_INIT_EN_;
+ ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+ }
+ }
+
+ ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
+ if (unlikely(ladv < 0))
+ return -EIO;
+
+ radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
+ if (unlikely(radv < 0))
+ return -EIO;
+
+ netif_dbg(dev, link, dev->net,
+ "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
+ ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
+
+ ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
+ netif_carrier_on(dev->net);
+ }
+
+ return ret;
+}
+
+/* some work can't be done in tasklets, so we use keventd
+ *
+ * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
+ * but tasklet_schedule() doesn't. hope the failure is rare.
+ */
+void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
+{
+ set_bit(work, &dev->flags);
+ if (!schedule_delayed_work(&dev->wq, 0))
+ netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+}
+
+static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
+{
+ u32 intdata;
+
+ if (urb->actual_length != 4) {
+ netdev_warn(dev->net,
+ "unexpected urb length %d", urb->actual_length);
+ return;
+ }
+
+ memcpy(&intdata, urb->transfer_buffer, 4);
+ le32_to_cpus(&intdata);
+
+ if (intdata & INT_ENP_PHY_INT) {
+ netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
+ lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+ } else
+ netdev_warn(dev->net,
+ "unexpected interrupt: 0x%08x\n", intdata);
+}
+
+static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+ return MAX_EEPROM_SIZE;
+}
+
+static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+
+ ee->magic = LAN78XX_EEPROM_MAGIC;
+
+ return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+}
+
+static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+
+ /* Allow entire eeprom update only */
+ if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
+ (ee->offset == 0) &&
+ (ee->len == 512) &&
+ (data[0] == EEPROM_INDICATOR))
+ return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+ else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
+ (ee->offset == 0) &&
+ (ee->len == 512) &&
+ (data[0] == OTP_INDICATOR_1))
+ return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+
+ return -EINVAL;
+}
+
+static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
+}
+
+static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(lan78xx_gstrings);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void lan78xx_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_statstage lan78xx_stat;
+ u32 *p;
+ int i;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
+ if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
+ p = (u32 *)&lan78xx_stat;
+ for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
+ data[i] = p[i];
+ }
+
+ usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ int ret;
+ u32 buf;
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
+ ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+ if (unlikely(ret < 0)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ if (buf & USB_CFG_RMT_WKP_) {
+ wol->supported = WAKE_ALL;
+ wol->wolopts = pdata->wol;
+ } else {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ }
+ }
+
+ usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ pdata->wol = 0;
+ if (wol->wolopts & WAKE_UCAST)
+ pdata->wol |= WAKE_UCAST;
+ if (wol->wolopts & WAKE_MCAST)
+ pdata->wol |= WAKE_MCAST;
+ if (wol->wolopts & WAKE_BCAST)
+ pdata->wol |= WAKE_BCAST;
+ if (wol->wolopts & WAKE_MAGIC)
+ pdata->wol |= WAKE_MAGIC;
+ if (wol->wolopts & WAKE_PHY)
+ pdata->wol |= WAKE_PHY;
+ if (wol->wolopts & WAKE_ARP)
+ pdata->wol |= WAKE_ARP;
+
+ device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
+ u32 buf;
+ u32 adv, lpadv;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ if (buf & MAC_CR_EEE_EN_) {
+ buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+ PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
+ adv = mmd_eee_adv_to_ethtool_adv_t(buf);
+ buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+ PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+ lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+ edata->eee_enabled = true;
+ edata->supported = true;
+ edata->eee_active = !!(adv & lpadv);
+ edata->advertised = adv;
+ edata->lp_advertised = lpadv;
+ edata->tx_lpi_enabled = true;
+ /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+ ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
+ edata->tx_lpi_timer = buf;
+ } else {
+ buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+ PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+ lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->supported = false;
+ edata->advertised = 0;
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
+ edata->tx_lpi_enabled = false;
+ edata->tx_lpi_timer = 0;
+ }
+
+ usb_autopm_put_interface(dev->intf);
+
+ return 0;
+}
+
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
+ u32 buf;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ if (edata->eee_enabled) {
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ buf |= MAC_CR_EEE_EN_;
+ ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+ buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ lan78xx_mmd_write(dev->net, dev->mii.phy_id,
+ PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
+ } else {
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ buf &= ~MAC_CR_EEE_EN_;
+ ret = lan78xx_write_reg(dev, MAC_CR, buf);
+ }
+
+ usb_autopm_put_interface(dev->intf);
+
+ return 0;
+}
+
+static u32 lan78xx_get_link(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ return mii_link_ok(&dev->mii);
+}
+
+int lan78xx_nway_reset(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+ return -EOPNOTSUPP;
+
+ return mii_nway_restart(&dev->mii);
+}
+
+static void lan78xx_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static u32 lan78xx_get_msglevel(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ return dev->msg_enable;
+}
+
+static void lan78xx_set_msglevel(struct net_device *net, u32 level)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ dev->msg_enable = level;
+}
+
+static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct mii_if_info *mii = &dev->mii;
+ int ret;
+ int buf;
+
+ if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+ return -EOPNOTSUPP;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ ret = mii_ethtool_gset(&dev->mii, cmd);
+
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+ buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+
+ buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
+ cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ } else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+ } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ }
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct mii_if_info *mii = &dev->mii;
+ int ret = 0;
+ int temp;
+
+ if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+ return -EOPNOTSUPP;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
+ if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_1);
+ temp = mii->mdio_read(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL);
+ temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL,
+ temp | PHY_EXT_MODE_CTRL_MDI_);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_0);
+ } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_1);
+ temp = mii->mdio_read(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL);
+ temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL,
+ temp | PHY_EXT_MODE_CTRL_MDI_X_);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_0);
+ } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_1);
+ temp = mii->mdio_read(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL);
+ temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL,
+ temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_0);
+ }
+ }
+
+ /* change speed & duplex */
+ ret = mii_ethtool_sset(&dev->mii, cmd);
+
+ if (!cmd->autoneg) {
+ /* force link down */
+ temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
+ temp | BMCR_LOOPBACK);
+ mdelay(1);
+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
+ }
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static const struct ethtool_ops lan78xx_ethtool_ops = {
+ .get_link = lan78xx_get_link,
+ .nway_reset = lan78xx_nway_reset,
+ .get_drvinfo = lan78xx_get_drvinfo,
+ .get_msglevel = lan78xx_get_msglevel,
+ .set_msglevel = lan78xx_set_msglevel,
+ .get_settings = lan78xx_get_settings,
+ .set_settings = lan78xx_set_settings,
+ .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
+ .get_eeprom = lan78xx_ethtool_get_eeprom,
+ .set_eeprom = lan78xx_ethtool_set_eeprom,
+ .get_ethtool_stats = lan78xx_get_stats,
+ .get_sset_count = lan78xx_get_sset_count,
+ .get_strings = lan78xx_get_strings,
+ .get_wol = lan78xx_get_wol,
+ .set_wol = lan78xx_set_wol,
+ .get_eee = lan78xx_get_eee,
+ .set_eee = lan78xx_set_eee,
+};
+
+static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+{
+ u32 addr_lo, addr_hi;
+ int ret;
+ u8 addr[6];
+
+ ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+
+ addr[0] = addr_lo & 0xFF;
+ addr[1] = (addr_lo >> 8) & 0xFF;
+ addr[2] = (addr_lo >> 16) & 0xFF;
+ addr[3] = (addr_lo >> 24) & 0xFF;
+ addr[4] = addr_hi & 0xFF;
+ addr[5] = (addr_hi >> 8) & 0xFF;
+
+ if (!is_valid_ether_addr(addr)) {
+ /* reading mac address from EEPROM or OTP */
+ if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+ addr) == 0) ||
+ (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+ addr) == 0)) {
+ if (is_valid_ether_addr(addr)) {
+ /* eeprom values are valid so use them */
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from EEPROM");
+ } else {
+ /* generate random MAC */
+ random_ether_addr(addr);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address set to random addr");
+ }
+
+ addr_lo = addr[0] | (addr[1] << 8) |
+ (addr[2] << 16) | (addr[3] << 24);
+ addr_hi = addr[4] | (addr[5] << 8);
+
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ } else {
+ /* generate random MAC */
+ random_ether_addr(addr);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address set to random addr");
+ }
+ }
+
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
+ ether_addr_copy(dev->net->dev_addr, addr);
+}
+
+static void lan78xx_mii_init(struct lan78xx_net *dev)
+{
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = lan78xx_mdio_read;
+ dev->mii.mdio_write = lan78xx_mdio_write;
+ dev->mii.phy_id_mask = 0x1f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = INTERNAL_PHY_ID;
+ dev->mii.supports_gmii = true;
+}
+
+static int lan78xx_phy_init(struct lan78xx_net *dev)
+{
+ int temp;
+ struct mii_if_info *mii = &dev->mii;
+
+ if ((!mii->mdio_write) || (!mii->mdio_read))
+ return -EOPNOTSUPP;
+
+ temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
+ temp |= ADVERTISE_ALL;
+ mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
+ temp | ADVERTISE_CSMA |
+ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+ /* set to AUTOMDIX */
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+ temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+ temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
+ temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+ dev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ /* MAC doesn't support 1000HD */
+ temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
+ mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
+ temp & ~ADVERTISE_1000HALF);
+
+ /* clear interrupt */
+ mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+ mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
+ PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
+ PHY_VTSE_INT_MASK_LINK_CHANGE_);
+
+ netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+
+ return 0;
+}
+
+static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
+{
+ int ret = 0;
+ u32 buf;
+ bool rxenabled;
+
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+
+ rxenabled = ((buf & MAC_RX_RXEN_) != 0);
+
+ if (rxenabled) {
+ buf &= ~MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ }
+
+ /* add 4 to size for FCS */
+ buf &= ~MAC_RX_MAX_SIZE_MASK_;
+ buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
+
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ if (rxenabled) {
+ buf |= MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ }
+
+ return 0;
+}
+
+static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ int count = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+ while (!skb_queue_empty(q)) {
+ struct skb_data *entry;
+ struct urb *urb;
+ int ret;
+
+ skb_queue_walk(q, skb) {
+ entry = (struct skb_data *)skb->cb;
+ if (entry->state != unlink_start)
+ goto found;
+ }
+ break;
+found:
+ entry->state = unlink_start;
+ urb = entry->urb;
+
+ /* Get reference count of the URB to avoid it to be
+ * freed during usb_unlink_urb, which may trigger
+ * use-after-free problem inside usb_unlink_urb since
+ * usb_unlink_urb is always racing with .complete
+ * handler(include defer_bh).
+ */
+ usb_get_urb(urb);
+ spin_unlock_irqrestore(&q->lock, flags);
+ /* during some PM-driven resume scenarios,
+ * these (async) unlinks complete immediately
+ */
+ ret = usb_unlink_urb(urb);
+ if (ret != -EINPROGRESS && ret != 0)
+ netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
+ else
+ count++;
+ usb_put_urb(urb);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+ return count;
+}
+
+static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ int ll_mtu = new_mtu + netdev->hard_header_len;
+ int old_hard_mtu = dev->hard_mtu;
+ int old_rx_urb_size = dev->rx_urb_size;
+ int ret;
+
+ if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+ return -EINVAL;
+
+ if (new_mtu <= 0)
+ return -EINVAL;
+ /* no second zero-length packet read wanted after mtu-sized packets */
+ if ((ll_mtu % dev->maxpacket) == 0)
+ return -EDOM;
+
+ ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+
+ netdev->mtu = new_mtu;
+
+ dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
+ if (dev->rx_urb_size == old_hard_mtu) {
+ dev->rx_urb_size = dev->hard_mtu;
+ if (dev->rx_urb_size > old_rx_urb_size) {
+ if (netif_running(dev->net)) {
+ unlink_urbs(dev, &dev->rxq);
+ tasklet_schedule(&dev->bh);
+ }
+ }
+ }
+
+ return 0;
+}
+
+int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ u32 addr_lo, addr_hi;
+ int ret;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
+ addr_lo = netdev->dev_addr[0] |
+ netdev->dev_addr[1] << 8 |
+ netdev->dev_addr[2] << 16 |
+ netdev->dev_addr[3] << 24;
+ addr_hi = netdev->dev_addr[4] |
+ netdev->dev_addr[5] << 8;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+
+ return 0;
+}
+
+/* Enable or disable Rx checksum offload engine */
+static int lan78xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+ if (features & NETIF_F_RXCSUM) {
+ pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
+ pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
+ } else {
+ pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
+ pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
+ }
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
+ else
+ pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
+
+ spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+ return 0;
+}
+
+static void lan78xx_deferred_vlan_write(struct work_struct *param)
+{
+ struct lan78xx_priv *pdata =
+ container_of(param, struct lan78xx_priv, set_vlan);
+ struct lan78xx_net *dev = pdata->dev;
+
+ lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
+ DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
+}
+
+static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u16 vid_bit_index;
+ u16 vid_dword_index;
+
+ vid_dword_index = (vid >> 5) & 0x7F;
+ vid_bit_index = vid & 0x1F;
+
+ pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
+
+ /* defer register writes to a sleepable context */
+ schedule_work(&pdata->set_vlan);
+
+ return 0;
+}
+
+static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u16 vid_bit_index;
+ u16 vid_dword_index;
+
+ vid_dword_index = (vid >> 5) & 0x7F;
+ vid_bit_index = vid & 0x1F;
+
+ pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
+
+ /* defer register writes to a sleepable context */
+ schedule_work(&pdata->set_vlan);
+
+ return 0;
+}
+
+static void lan78xx_init_ltm(struct lan78xx_net *dev)
+{
+ int ret;
+ u32 buf;
+ u32 regs[6] = { 0 };
+
+ ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (buf & USB_CFG1_LTM_ENABLE_) {
+ u8 temp[2];
+ /* Get values from EEPROM first */
+ if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
+ if (temp[0] == 24) {
+ ret = lan78xx_read_raw_eeprom(dev,
+ temp[1] * 2,
+ 24,
+ (u8 *)regs);
+ if (ret < 0)
+ return;
+ }
+ } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
+ if (temp[0] == 24) {
+ ret = lan78xx_read_raw_otp(dev,
+ temp[1] * 2,
+ 24,
+ (u8 *)regs);
+ if (ret < 0)
+ return;
+ }
+ }
+ }
+
+ lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+ lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+ lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+ lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+ lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+ lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+}
+
+static int lan78xx_reset(struct lan78xx_net *dev)
+{
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u32 buf;
+ int ret = 0;
+ unsigned long timeout;
+
+ ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ buf |= HW_CFG_LRST_;
+ ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+ timeout = jiffies + HZ;
+ do {
+ mdelay(1);
+ ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(dev->net,
+ "timeout on completion of LiteReset");
+ return -EIO;
+ }
+ } while (buf & HW_CFG_LRST_);
+
+ lan78xx_init_mac_address(dev);
+
+ /* Respond to the IN token with a NAK */
+ ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+ buf |= USB_CFG_BIR_;
+ ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+ /* Init LTM */
+ lan78xx_init_ltm(dev);
+
+ dev->net->hard_header_len += TX_OVERHEAD;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ if (dev->udev->speed == USB_SPEED_SUPER) {
+ buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
+ dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+ dev->rx_qlen = 4;
+ dev->tx_qlen = 4;
+ } else if (dev->udev->speed == USB_SPEED_HIGH) {
+ buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
+ dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+ dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
+ dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
+ } else {
+ buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+ dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+ dev->rx_qlen = 4;
+ }
+
+ ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+ ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+
+ ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ buf |= HW_CFG_MEF_;
+ ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+ ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+ buf |= USB_CFG_BCE_;
+ ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+ /* set FIFO sizes */
+ buf = (MAX_RX_FIFO_SIZE - 512) / 512;
+ ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
+
+ buf = (MAX_TX_FIFO_SIZE - 512) / 512;
+ ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
+
+ ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+ ret = lan78xx_write_reg(dev, FLOW, 0);
+ ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
+
+ /* Don't need rfe_ctl_lock during initialisation */
+ ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
+ pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+ /* Enable or disable checksum offload engines */
+ lan78xx_set_features(dev->net, dev->net->features);
+
+ lan78xx_set_multicast(dev->net);
+
+ /* reset PHY */
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ buf |= PMT_CTL_PHY_RST_;
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+ timeout = jiffies + HZ;
+ do {
+ mdelay(1);
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(dev->net, "timeout waiting for PHY Reset");
+ return -EIO;
+ }
+ } while (buf & PMT_CTL_PHY_RST_);
+
+ lan78xx_mii_init(dev);
+
+ ret = lan78xx_phy_init(dev);
+
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+
+ buf |= MAC_CR_GMII_EN_;
+ buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
+
+ ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+ /* enable on PHY */
+ if (buf & MAC_CR_EEE_EN_)
+ lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
+
+ /* enable PHY interrupts */
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ buf |= INT_ENP_PHY_INT;
+ ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
+
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf |= MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+ ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
+ buf |= FCT_TX_CTL_EN_;
+ ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
+
+ ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf |= MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
+ buf |= FCT_RX_CTL_EN_;
+ ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
+
+ if (!mii_nway_restart(&dev->mii))
+ netif_dbg(dev, link, dev->net, "autoneg initiated");
+
+ return 0;
+}
+
+static int lan78xx_open(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ goto out;
+
+ ret = lan78xx_reset(dev);
+ if (ret < 0)
+ goto done;
+
+ /* for Link Check */
+ if (dev->urb_intr) {
+ ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
+ if (ret < 0) {
+ netif_err(dev, ifup, dev->net,
+ "intr submit %d\n", ret);
+ goto done;
+ }
+ }
+
+ set_bit(EVENT_DEV_OPEN, &dev->flags);
+
+ netif_start_queue(net);
+
+ dev->link_on = false;
+
+ lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+done:
+ usb_autopm_put_interface(dev->intf);
+
+out:
+ return ret;
+}
+
+static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+ DECLARE_WAITQUEUE(wait, current);
+ int temp;
+
+ /* ensure there are no more active urbs */
+ add_wait_queue(&unlink_wakeup, &wait);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ dev->wait = &unlink_wakeup;
+ temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
+
+ /* maybe wait for deletions to finish. */
+ while (!skb_queue_empty(&dev->rxq) &&
+ !skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->done)) {
+ schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
+ }
+ set_current_state(TASK_RUNNING);
+ dev->wait = NULL;
+ remove_wait_queue(&unlink_wakeup, &wait);
+}
+
+int lan78xx_stop(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
+ netif_stop_queue(net);
+
+ netif_info(dev, ifdown, dev->net,
+ "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors);
+
+ lan78xx_terminate_urbs(dev);
+
+ usb_kill_urb(dev->urb_intr);
+
+ skb_queue_purge(&dev->rxq_pause);
+
+ /* deferred work (task, timer, softirq) must also stop.
+ * can't flush_scheduled_work() until we drop rtnl (later),
+ * else workers could deadlock; so make workers a NOP.
+ */
+ dev->flags = 0;
+ cancel_delayed_work_sync(&dev->wq);
+ tasklet_kill(&dev->bh);
+
+ usb_autopm_put_interface(dev->intf);
+
+ return 0;
+}
+
+static int lan78xx_linearize(struct sk_buff *skb)
+{
+ return skb_linearize(skb);
+}
+
+static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
+ struct sk_buff *skb, gfp_t flags)
+{
+ u32 tx_cmd_a, tx_cmd_b;
+
+ if (skb_headroom(skb) < TX_OVERHEAD) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ if (lan78xx_linearize(skb) < 0)
+ return NULL;
+
+ tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
+
+ tx_cmd_b = 0;
+ if (skb_is_gso(skb)) {
+ u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
+
+ tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
+
+ tx_cmd_a |= TX_CMD_A_LSO_;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ tx_cmd_a |= TX_CMD_A_IVTG_;
+ tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
+ }
+
+ skb_push(skb, 4);
+ cpu_to_le32s(&tx_cmd_b);
+ memcpy(skb->data, &tx_cmd_b, 4);
+
+ skb_push(skb, 4);
+ cpu_to_le32s(&tx_cmd_a);
+ memcpy(skb->data, &tx_cmd_a, 4);
+
+ return skb;
+}
+
+static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
+ struct sk_buff_head *list, enum skb_state state)
+{
+ unsigned long flags;
+ enum skb_state old_state;
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+
+ spin_lock_irqsave(&list->lock, flags);
+ old_state = entry->state;
+ entry->state = state;
+
+ __skb_unlink(skb, list);
+ spin_unlock(&list->lock);
+ spin_lock(&dev->done.lock);
+
+ __skb_queue_tail(&dev->done, skb);
+ if (skb_queue_len(&dev->done) == 1)
+ tasklet_schedule(&dev->bh);
+ spin_unlock_irqrestore(&dev->done.lock, flags);
+
+ return old_state;
+}
+
+static void tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *)urb->context;
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+ struct lan78xx_net *dev = entry->dev;
+
+ if (urb->status == 0) {
+ dev->net->stats.tx_packets++;
+ dev->net->stats.tx_bytes += entry->length;
+ } else {
+ dev->net->stats.tx_errors++;
+
+ switch (urb->status) {
+ case -EPIPE:
+ lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+ break;
+
+ /* software-driven interface shutdown */
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ break;
+
+ case -EPROTO:
+ case -ETIME:
+ case -EILSEQ:
+ netif_stop_queue(dev->net);
+ break;
+ default:
+ netif_dbg(dev, tx_err, dev->net,
+ "tx err %d\n", entry->urb->status);
+ break;
+ }
+ }
+
+ usb_autopm_put_interface_async(dev->intf);
+
+ defer_bh(dev, skb, &dev->txq, tx_done);
+}
+
+static void lan78xx_queue_skb(struct sk_buff_head *list,
+ struct sk_buff *newsk, enum skb_state state)
+{
+ struct skb_data *entry = (struct skb_data *)newsk->cb;
+
+ __skb_queue_tail(list, newsk);
+ entry->state = state;
+}
+
+netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct sk_buff *skb2 = NULL;
+
+ if (skb) {
+ skb_tx_timestamp(skb);
+ skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
+ }
+
+ if (skb2) {
+ skb_queue_tail(&dev->txq_pend, skb2);
+
+ if (skb_queue_len(&dev->txq_pend) > 10)
+ netif_stop_queue(net);
+ } else {
+ netif_dbg(dev, tx_err, dev->net,
+ "lan78xx_tx_prep return NULL\n");
+ dev->net->stats.tx_errors++;
+ dev->net->stats.tx_dropped++;
+ }
+
+ tasklet_schedule(&dev->bh);
+
+ return NETDEV_TX_OK;
+}
+
+int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+ int tmp;
+ struct usb_host_interface *alt = NULL;
+ struct usb_host_endpoint *in = NULL, *out = NULL;
+ struct usb_host_endpoint *status = NULL;
+
+ for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
+ unsigned ep;
+
+ in = NULL;
+ out = NULL;
+ status = NULL;
+ alt = intf->altsetting + tmp;
+
+ for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
+ struct usb_host_endpoint *e;
+ int intr = 0;
+
+ e = alt->endpoint + ep;
+ switch (e->desc.bmAttributes) {
+ case USB_ENDPOINT_XFER_INT:
+ if (!usb_endpoint_dir_in(&e->desc))
+ continue;
+ intr = 1;
+ /* FALLTHROUGH */
+ case USB_ENDPOINT_XFER_BULK:
+ break;
+ default:
+ continue;
+ }
+ if (usb_endpoint_dir_in(&e->desc)) {
+ if (!intr && !in)
+ in = e;
+ else if (intr && !status)
+ status = e;
+ } else {
+ if (!out)
+ out = e;
+ }
+ }
+ if (in && out)
+ break;
+ }
+ if (!alt || !in || !out)
+ return -EINVAL;
+
+ dev->pipe_in = usb_rcvbulkpipe(dev->udev,
+ in->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+ dev->pipe_out = usb_sndbulkpipe(dev->udev,
+ out->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+ dev->ep_intr = status;
+
+ return 0;
+}
+
+static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+ struct lan78xx_priv *pdata = NULL;
+ int ret;
+ int i;
+
+ ret = lan78xx_get_endpoints(dev, intf);
+
+ dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+ pdata = (struct lan78xx_priv *)(dev->data[0]);
+ if (!pdata) {
+ netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
+ return -ENOMEM;
+ }
+
+ pdata->dev = dev;
+
+ spin_lock_init(&pdata->rfe_ctl_lock);
+ mutex_init(&pdata->dataport_mutex);
+
+ INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
+
+ for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
+ pdata->vlan_table[i] = 0;
+
+ INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
+
+ dev->net->features = 0;
+
+ if (DEFAULT_TX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_HW_CSUM;
+
+ if (DEFAULT_RX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_RXCSUM;
+
+ if (DEFAULT_TSO_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
+
+ dev->net->hw_features = dev->net->features;
+
+ /* Init all registers */
+ ret = lan78xx_reset(dev);
+
+ dev->net->flags |= IFF_MULTICAST;
+
+ pdata->wol = WAKE_MAGIC;
+
+ return 0;
+}
+
+static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+ if (pdata) {
+ netif_dbg(dev, ifdown, dev->net, "free pdata");
+ kfree(pdata);
+ pdata = NULL;
+ dev->data[0] = 0;
+ }
+}
+
+static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
+ struct sk_buff *skb,
+ u32 rx_cmd_a, u32 rx_cmd_b)
+{
+ if (!(dev->net->features & NETIF_F_RXCSUM) ||
+ unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else {
+ skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+}
+
+void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+ int status;
+
+ if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
+ skb_queue_tail(&dev->rxq_pause, skb);
+ return;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev->net);
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
+
+ netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
+ skb->len + sizeof(struct ethhdr), skb->protocol);
+ memset(skb->cb, 0, sizeof(struct skb_data));
+
+ if (skb_defer_rx_timestamp(skb))
+ return;
+
+ status = netif_rx(skb);
+ if (status != NET_RX_SUCCESS)
+ netif_dbg(dev, rx_err, dev->net,
+ "netif_rx status %d\n", status);
+}
+
+static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
+ while (skb->len > 0) {
+ u32 rx_cmd_a, rx_cmd_b, align_count, size;
+ u16 rx_cmd_c;
+ struct sk_buff *skb2;
+ unsigned char *packet;
+
+ memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
+ le32_to_cpus(&rx_cmd_a);
+ skb_pull(skb, sizeof(rx_cmd_a));
+
+ memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
+ le32_to_cpus(&rx_cmd_b);
+ skb_pull(skb, sizeof(rx_cmd_b));
+
+ memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
+ le16_to_cpus(&rx_cmd_c);
+ skb_pull(skb, sizeof(rx_cmd_c));
+
+ packet = skb->data;
+
+ /* get the packet length */
+ size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
+ align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+
+ if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "Error rx_cmd_a=0x%08x", rx_cmd_a);
+ } else {
+ /* last frame in this batch */
+ if (skb->len == size) {
+ lan78xx_rx_csum_offload(dev, skb,
+ rx_cmd_a, rx_cmd_b);
+
+ skb_trim(skb, skb->len - 4); /* remove fcs */
+ skb->truesize = size + sizeof(struct sk_buff);
+
+ return 1;
+ }
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb2)) {
+ netdev_warn(dev->net, "Error allocating skb");
+ return 0;
+ }
+
+ skb2->len = size;
+ skb2->data = packet;
+ skb_set_tail_pointer(skb2, size);
+
+ lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+
+ skb_trim(skb2, skb2->len - 4); /* remove fcs */
+ skb2->truesize = size + sizeof(struct sk_buff);
+
+ lan78xx_skb_return(dev, skb2);
+ }
+
+ skb_pull(skb, size);
+
+ /* padding bytes before the next frame starts */
+ if (skb->len)
+ skb_pull(skb, align_count);
+ }
+
+ if (unlikely(skb->len < 0)) {
+ netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+ if (!lan78xx_rx(dev, skb)) {
+ dev->net->stats.rx_errors++;
+ goto done;
+ }
+
+ if (skb->len) {
+ lan78xx_skb_return(dev, skb);
+ return;
+ }
+
+ netif_dbg(dev, rx_err, dev->net, "drop\n");
+ dev->net->stats.rx_errors++;
+done:
+ skb_queue_tail(&dev->done, skb);
+}
+
+static void rx_complete(struct urb *urb);
+
+static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
+{
+ struct sk_buff *skb;
+ struct skb_data *entry;
+ unsigned long lockflags;
+ size_t size = dev->rx_urb_size;
+ int ret = 0;
+
+ skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!skb) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ entry = (struct skb_data *)skb->cb;
+ entry->urb = urb;
+ entry->dev = dev;
+ entry->length = 0;
+
+ usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
+ skb->data, size, rx_complete, skb);
+
+ spin_lock_irqsave(&dev->rxq.lock, lockflags);
+
+ if (netif_device_present(dev->net) &&
+ netif_running(dev->net) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
+ !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ switch (ret) {
+ case 0:
+ lan78xx_queue_skb(&dev->rxq, skb, rx_start);
+ break;
+ case -EPIPE:
+ lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+ break;
+ case -ENODEV:
+ netif_dbg(dev, ifdown, dev->net, "device gone\n");
+ netif_device_detach(dev->net);
+ break;
+ case -EHOSTUNREACH:
+ ret = -ENOLINK;
+ break;
+ default:
+ netif_dbg(dev, rx_err, dev->net,
+ "rx submit, %d\n", ret);
+ tasklet_schedule(&dev->bh);
+ }
+ } else {
+ netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+ ret = -ENOLINK;
+ }
+ spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ usb_free_urb(urb);
+ }
+ return ret;
+}
+
+static void rx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *)urb->context;
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+ struct lan78xx_net *dev = entry->dev;
+ int urb_status = urb->status;
+ enum skb_state state;
+
+ skb_put(skb, urb->actual_length);
+ state = rx_done;
+ entry->urb = NULL;
+
+ switch (urb_status) {
+ case 0:
+ if (skb->len < dev->net->hard_header_len) {
+ state = rx_cleanup;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ netif_dbg(dev, rx_err, dev->net,
+ "rx length %d\n", skb->len);
+ }
+ usb_mark_last_busy(dev->udev);
+ break;
+ case -EPIPE:
+ dev->net->stats.rx_errors++;
+ lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+ /* FALLTHROUGH */
+ case -ECONNRESET: /* async unlink */
+ case -ESHUTDOWN: /* hardware gone */
+ netif_dbg(dev, ifdown, dev->net,
+ "rx shutdown, code %d\n", urb_status);
+ state = rx_cleanup;
+ entry->urb = urb;
+ urb = NULL;
+ break;
+ case -EPROTO:
+ case -ETIME:
+ case -EILSEQ:
+ dev->net->stats.rx_errors++;
+ state = rx_cleanup;
+ entry->urb = urb;
+ urb = NULL;
+ break;
+
+ /* data overrun ... flush fifo? */
+ case -EOVERFLOW:
+ dev->net->stats.rx_over_errors++;
+ /* FALLTHROUGH */
+
+ default:
+ state = rx_cleanup;
+ dev->net->stats.rx_errors++;
+ netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
+ break;
+ }
+
+ state = defer_bh(dev, skb, &dev->rxq, state);
+
+ if (urb) {
+ if (netif_running(dev->net) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
+ state != unlink_start) {
+ rx_submit(dev, urb, GFP_ATOMIC);
+ return;
+ }
+ usb_free_urb(urb);
+ }
+ netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
+}
+
+static void lan78xx_tx_bh(struct lan78xx_net *dev)
+{
+ int length;
+ struct urb *urb = NULL;
+ struct skb_data *entry;
+ unsigned long flags;
+ struct sk_buff_head *tqp = &dev->txq_pend;
+ struct sk_buff *skb, *skb2;
+ int ret;
+ int count, pos;
+ int skb_totallen, pkt_cnt;
+
+ skb_totallen = 0;
+ pkt_cnt = 0;
+ for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
+ if (skb_is_gso(skb)) {
+ if (pkt_cnt) {
+ /* handle previous packets first */
+ break;
+ }
+ length = skb->len;
+ skb2 = skb_dequeue(tqp);
+ goto gso_skb;
+ }
+
+ if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
+ break;
+ skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
+ pkt_cnt++;
+ }
+
+ /* copy to a single skb */
+ skb = alloc_skb(skb_totallen, GFP_ATOMIC);
+ if (!skb)
+ goto drop;
+
+ skb_put(skb, skb_totallen);
+
+ for (count = pos = 0; count < pkt_cnt; count++) {
+ skb2 = skb_dequeue(tqp);
+ if (skb2) {
+ memcpy(skb->data + pos, skb2->data, skb2->len);
+ pos += roundup(skb2->len, sizeof(u32));
+ dev_kfree_skb(skb2);
+ }
+ }
+
+ length = skb_totallen;
+
+gso_skb:
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netif_dbg(dev, tx_err, dev->net, "no urb\n");
+ goto drop;
+ }
+
+ entry = (struct skb_data *)skb->cb;
+ entry->urb = urb;
+ entry->dev = dev;
+ entry->length = length;
+
+ spin_lock_irqsave(&dev->txq.lock, flags);
+ ret = usb_autopm_get_interface_async(dev->intf);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ goto drop;
+ }
+
+ usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
+ skb->data, skb->len, tx_complete, skb);
+
+ if (length % dev->maxpacket == 0) {
+ /* send USB_ZERO_PACKET */
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ }
+
+#ifdef CONFIG_PM
+ /* if this triggers the device is still a sleep */
+ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ /* transmission will be done in resume */
+ usb_anchor_urb(urb, &dev->deferred);
+ /* no use to process more packets */
+ netif_stop_queue(dev->net);
+ usb_put_urb(urb);
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ netdev_dbg(dev->net, "Delaying transmission for resumption\n");
+ return;
+ }
+#endif
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ switch (ret) {
+ case 0:
+ dev->net->trans_start = jiffies;
+ lan78xx_queue_skb(&dev->txq, skb, tx_start);
+ if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
+ netif_stop_queue(dev->net);
+ break;
+ case -EPIPE:
+ netif_stop_queue(dev->net);
+ lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+ usb_autopm_put_interface_async(dev->intf);
+ break;
+ default:
+ usb_autopm_put_interface_async(dev->intf);
+ netif_dbg(dev, tx_err, dev->net,
+ "tx: submit urb err %d\n", ret);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+
+ if (ret) {
+ netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
+drop:
+ dev->net->stats.tx_dropped++;
+ if (skb)
+ dev_kfree_skb_any(skb);
+ usb_free_urb(urb);
+ } else
+ netif_dbg(dev, tx_queued, dev->net,
+ "> tx, len %d, type 0x%x\n", length, skb->protocol);
+}
+
+static void lan78xx_rx_bh(struct lan78xx_net *dev)
+{
+ struct urb *urb;
+ int i;
+
+ if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
+ for (i = 0; i < 10; i++) {
+ if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
+ break;
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urb)
+ if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
+ return;
+ }
+
+ if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
+ tasklet_schedule(&dev->bh);
+ }
+ if (skb_queue_len(&dev->txq) < dev->tx_qlen)
+ netif_wake_queue(dev->net);
+}
+
+static void lan78xx_bh(unsigned long param)
+{
+ struct lan78xx_net *dev = (struct lan78xx_net *)param;
+ struct sk_buff *skb;
+ struct skb_data *entry;
+
+ while ((skb = skb_dequeue(&dev->done))) {
+ entry = (struct skb_data *)(skb->cb);
+ switch (entry->state) {
+ case rx_done:
+ entry->state = rx_cleanup;
+ rx_process(dev, skb);
+ continue;
+ case tx_done:
+ usb_free_urb(entry->urb);
+ dev_kfree_skb(skb);
+ continue;
+ case rx_cleanup:
+ usb_free_urb(entry->urb);
+ dev_kfree_skb(skb);
+ continue;
+ default:
+ netdev_dbg(dev->net, "skb state %d\n", entry->state);
+ return;
+ }
+ }
+
+ if (netif_device_present(dev->net) && netif_running(dev->net)) {
+ if (!skb_queue_empty(&dev->txq_pend))
+ lan78xx_tx_bh(dev);
+
+ if (!timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags))
+ lan78xx_rx_bh(dev);
+ }
+}
+
+static void lan78xx_delayedwork(struct work_struct *work)
+{
+ int status;
+ struct lan78xx_net *dev;
+
+ dev = container_of(work, struct lan78xx_net, wq.work);
+
+ if (test_bit(EVENT_TX_HALT, &dev->flags)) {
+ unlink_urbs(dev, &dev->txq);
+ status = usb_autopm_get_interface(dev->intf);
+ if (status < 0)
+ goto fail_pipe;
+ status = usb_clear_halt(dev->udev, dev->pipe_out);
+ usb_autopm_put_interface(dev->intf);
+ if (status < 0 &&
+ status != -EPIPE &&
+ status != -ESHUTDOWN) {
+ if (netif_msg_tx_err(dev))
+fail_pipe:
+ netdev_err(dev->net,
+ "can't clear tx halt, status %d\n",
+ status);
+ } else {
+ clear_bit(EVENT_TX_HALT, &dev->flags);
+ if (status != -ESHUTDOWN)
+ netif_wake_queue(dev->net);
+ }
+ }
+ if (test_bit(EVENT_RX_HALT, &dev->flags)) {
+ unlink_urbs(dev, &dev->rxq);
+ status = usb_autopm_get_interface(dev->intf);
+ if (status < 0)
+ goto fail_halt;
+ status = usb_clear_halt(dev->udev, dev->pipe_in);
+ usb_autopm_put_interface(dev->intf);
+ if (status < 0 &&
+ status != -EPIPE &&
+ status != -ESHUTDOWN) {
+ if (netif_msg_rx_err(dev))
+fail_halt:
+ netdev_err(dev->net,
+ "can't clear rx halt, status %d\n",
+ status);
+ } else {
+ clear_bit(EVENT_RX_HALT, &dev->flags);
+ tasklet_schedule(&dev->bh);
+ }
+ }
+
+ if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
+ int ret = 0;
+
+ clear_bit(EVENT_LINK_RESET, &dev->flags);
+ status = usb_autopm_get_interface(dev->intf);
+ if (status < 0)
+ goto skip_reset;
+ if (lan78xx_link_reset(dev) < 0) {
+ usb_autopm_put_interface(dev->intf);
+skip_reset:
+ netdev_info(dev->net, "link reset failed (%d)\n",
+ ret);
+ } else {
+ usb_autopm_put_interface(dev->intf);
+ }
+ }
+}
+
+static void intr_complete(struct urb *urb)
+{
+ struct lan78xx_net *dev = urb->context;
+ int status = urb->status;
+
+ switch (status) {
+ /* success */
+ case 0:
+ lan78xx_status(dev, urb);
+ break;
+
+ /* software-driven interface shutdown */
+ case -ENOENT: /* urb killed */
+ case -ESHUTDOWN: /* hardware gone */
+ netif_dbg(dev, ifdown, dev->net,
+ "intr shutdown, code %d\n", status);
+ return;
+
+ /* NOTE: not throttling like RX/TX, since this endpoint
+ * already polls infrequently
+ */
+ default:
+ netdev_dbg(dev->net, "intr status %d\n", status);
+ break;
+ }
+
+ if (!netif_running(dev->net))
+ return;
+
+ memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status != 0)
+ netif_err(dev, timer, dev->net,
+ "intr resubmit --> %d\n", status);
+}
+
+static void lan78xx_disconnect(struct usb_interface *intf)
+{
+ struct lan78xx_net *dev;
+ struct usb_device *udev;
+ struct net_device *net;
+
+ dev = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, NULL);
+ if (!dev)
+ return;
+
+ udev = interface_to_usbdev(intf);
+
+ net = dev->net;
+ unregister_netdev(net);
+
+ cancel_delayed_work_sync(&dev->wq);
+
+ usb_scuttle_anchored_urbs(&dev->deferred);
+
+ lan78xx_unbind(dev, intf);
+
+ usb_kill_urb(dev->urb_intr);
+ usb_free_urb(dev->urb_intr);
+
+ free_netdev(net);
+ usb_put_dev(udev);
+}
+
+void lan78xx_tx_timeout(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ unlink_urbs(dev, &dev->txq);
+ tasklet_schedule(&dev->bh);
+}
+
+static const struct net_device_ops lan78xx_netdev_ops = {
+ .ndo_open = lan78xx_open,
+ .ndo_stop = lan78xx_stop,
+ .ndo_start_xmit = lan78xx_start_xmit,
+ .ndo_tx_timeout = lan78xx_tx_timeout,
+ .ndo_change_mtu = lan78xx_change_mtu,
+ .ndo_set_mac_address = lan78xx_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = lan78xx_ioctl,
+ .ndo_set_rx_mode = lan78xx_set_multicast,
+ .ndo_set_features = lan78xx_set_features,
+ .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
+};
+
+static int lan78xx_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct lan78xx_net *dev;
+ struct net_device *netdev;
+ struct usb_device *udev;
+ int ret;
+ unsigned maxp;
+ unsigned period;
+ u8 *buf = NULL;
+
+ udev = interface_to_usbdev(intf);
+ udev = usb_get_dev(udev);
+
+ ret = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct lan78xx_net));
+ if (!netdev) {
+ dev_err(&intf->dev, "Error: OOM\n");
+ goto out1;
+ }
+
+ /* netdev_printk() needs this */
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ dev = netdev_priv(netdev);
+ dev->udev = udev;
+ dev->intf = intf;
+ dev->net = netdev;
+ dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
+ | NETIF_MSG_PROBE | NETIF_MSG_LINK);
+
+ skb_queue_head_init(&dev->rxq);
+ skb_queue_head_init(&dev->txq);
+ skb_queue_head_init(&dev->done);
+ skb_queue_head_init(&dev->rxq_pause);
+ skb_queue_head_init(&dev->txq_pend);
+ mutex_init(&dev->phy_mutex);
+
+ tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
+ INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
+ init_usb_anchor(&dev->deferred);
+
+ netdev->netdev_ops = &lan78xx_netdev_ops;
+ netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
+ netdev->ethtool_ops = &lan78xx_ethtool_ops;
+
+ ret = lan78xx_bind(dev, intf);
+ if (ret < 0)
+ goto out2;
+ strcpy(netdev->name, "eth%d");
+
+ if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
+ netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
+
+ dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
+ dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
+ dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
+
+ dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+ dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+
+ dev->pipe_intr = usb_rcvintpipe(dev->udev,
+ dev->ep_intr->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+ period = dev->ep_intr->desc.bInterval;
+
+ maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+ buf = kmalloc(maxp, GFP_KERNEL);
+ if (buf) {
+ dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->urb_intr) {
+ kfree(buf);
+ goto out3;
+ } else {
+ usb_fill_int_urb(dev->urb_intr, dev->udev,
+ dev->pipe_intr, buf, maxp,
+ intr_complete, dev, period);
+ }
+ }
+
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+
+ /* driver requires remote-wakeup capability during autosuspend. */
+ intf->needs_remote_wakeup = 1;
+
+ ret = register_netdev(netdev);
+ if (ret != 0) {
+ netif_err(dev, probe, netdev, "couldn't register the device\n");
+ goto out2;
+ }
+
+ usb_set_intfdata(intf, dev);
+
+ ret = device_set_wakeup_enable(&udev->dev, true);
+
+ /* Default delay of 2sec has more overhead than advantage.
+ * Set to 10sec as default.
+ */
+ pm_runtime_set_autosuspend_delay(&udev->dev,
+ DEFAULT_AUTOSUSPEND_DELAY);
+
+ return 0;
+
+out3:
+ lan78xx_unbind(dev, intf);
+out2:
+ free_netdev(netdev);
+out1:
+ usb_put_dev(udev);
+
+ return ret;
+}
+
+static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
+{
+ const u16 crc16poly = 0x8005;
+ int i;
+ u16 bit, crc, msb;
+ u8 data;
+
+ crc = 0xFFFF;
+ for (i = 0; i < len; i++) {
+ data = *buf++;
+ for (bit = 0; bit < 8; bit++) {
+ msb = crc >> 15;
+ crc <<= 1;
+
+ if (msb ^ (u16)(data & 1)) {
+ crc ^= crc16poly;
+ crc |= (u16)0x0001U;
+ }
+ data >>= 1;
+ }
+ }
+
+ return crc;
+}
+
+static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
+{
+ u32 buf;
+ int ret;
+ int mask_index;
+ u16 crc;
+ u32 temp_wucsr;
+ u32 temp_pmt_ctl;
+ const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+ const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+ const u8 arp_type[2] = { 0x08, 0x06 };
+
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf &= ~MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf &= ~MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ ret = lan78xx_write_reg(dev, WUCSR, 0);
+ ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+ temp_wucsr = 0;
+
+ temp_pmt_ctl = 0;
+ ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+ temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
+ temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
+
+ for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
+ ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+
+ mask_index = 0;
+ if (wol & WAKE_PHY) {
+ temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ if (wol & WAKE_MAGIC) {
+ temp_wucsr |= WUCSR_MPEN_;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
+ }
+ if (wol & WAKE_BCAST) {
+ temp_wucsr |= WUCSR_BCST_EN_;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ if (wol & WAKE_MCAST) {
+ temp_wucsr |= WUCSR_WAKE_EN_;
+
+ /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
+ crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
+ ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ WUF_CFGX_EN_ |
+ WUF_CFGX_TYPE_MCAST_ |
+ (0 << WUF_CFGX_OFFSET_SHIFT_) |
+ (crc & WUF_CFGX_CRC16_MASK_));
+
+ ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+ ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ /* for IPv6 Multicast */
+ crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
+ ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ WUF_CFGX_EN_ |
+ WUF_CFGX_TYPE_MCAST_ |
+ (0 << WUF_CFGX_OFFSET_SHIFT_) |
+ (crc & WUF_CFGX_CRC16_MASK_));
+
+ ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+ ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ if (wol & WAKE_UCAST) {
+ temp_wucsr |= WUCSR_PFDA_EN_;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ if (wol & WAKE_ARP) {
+ temp_wucsr |= WUCSR_WAKE_EN_;
+
+ /* set WUF_CFG & WUF_MASK
+ * for packettype (offset 12,13) = ARP (0x0806)
+ */
+ crc = lan78xx_wakeframe_crc16(arp_type, 2);
+ ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ WUF_CFGX_EN_ |
+ WUF_CFGX_TYPE_ALL_ |
+ (0 << WUF_CFGX_OFFSET_SHIFT_) |
+ (crc & WUF_CFGX_CRC16_MASK_));
+
+ ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+ ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+
+ ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+
+ /* when multiple WOL bits are set */
+ if (hweight_long((unsigned long)wol) > 1) {
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+
+ /* clear WUPS */
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ buf |= PMT_CTL_WUPS_MASK_;
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf |= MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ return 0;
+}
+
+int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct lan78xx_net *dev = usb_get_intfdata(intf);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u32 buf;
+ int ret;
+ int event;
+
+ ret = 0;
+ event = message.event;
+
+ if (!dev->suspend_count++) {
+ spin_lock_irq(&dev->txq.lock);
+ /* don't autosuspend while transmitting */
+ if ((skb_queue_len(&dev->txq) ||
+ skb_queue_len(&dev->txq_pend)) &&
+ PMSG_IS_AUTO(message)) {
+ spin_unlock_irq(&dev->txq.lock);
+ ret = -EBUSY;
+ goto out;
+ } else {
+ set_bit(EVENT_DEV_ASLEEP, &dev->flags);
+ spin_unlock_irq(&dev->txq.lock);
+ }
+
+ /* stop TX & RX */
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf &= ~MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf &= ~MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ /* empty out the rx and queues */
+ netif_device_detach(dev->net);
+ lan78xx_terminate_urbs(dev);
+ usb_kill_urb(dev->urb_intr);
+
+ /* reattach */
+ netif_device_attach(dev->net);
+ }
+
+ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ if (PMSG_IS_AUTO(message)) {
+ /* auto suspend (selective suspend) */
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf &= ~MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf &= ~MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ ret = lan78xx_write_reg(dev, WUCSR, 0);
+ ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+ /* set goodframe wakeup */
+ ret = lan78xx_read_reg(dev, WUCSR, &buf);
+
+ buf |= WUCSR_RFE_WAKE_EN_;
+ buf |= WUCSR_STORE_WAKE_;
+
+ ret = lan78xx_write_reg(dev, WUCSR, buf);
+
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+ buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
+ buf |= PMT_CTL_RES_CLR_WKP_STS_;
+
+ buf |= PMT_CTL_PHY_WAKE_EN_;
+ buf |= PMT_CTL_WOL_EN_;
+ buf &= ~PMT_CTL_SUS_MODE_MASK_;
+ buf |= PMT_CTL_SUS_MODE_3_;
+
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+ buf |= PMT_CTL_WUPS_MASK_;
+
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf |= MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ } else {
+ lan78xx_set_suspend(dev, pdata->wol);
+ }
+ }
+
+out:
+ return ret;
+}
+
+int lan78xx_resume(struct usb_interface *intf)
+{
+ struct lan78xx_net *dev = usb_get_intfdata(intf);
+ struct sk_buff *skb;
+ struct urb *res;
+ int ret;
+ u32 buf;
+
+ if (!--dev->suspend_count) {
+ /* resume interrupt URBs */
+ if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
+ usb_submit_urb(dev->urb_intr, GFP_NOIO);
+
+ spin_lock_irq(&dev->txq.lock);
+ while ((res = usb_get_from_anchor(&dev->deferred))) {
+ skb = (struct sk_buff *)res->context;
+ ret = usb_submit_urb(res, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_kfree_skb_any(skb);
+ usb_free_urb(res);
+ usb_autopm_put_interface_async(dev->intf);
+ } else {
+ dev->net->trans_start = jiffies;
+ lan78xx_queue_skb(&dev->txq, skb, tx_start);
+ }
+ }
+
+ clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
+ spin_unlock_irq(&dev->txq.lock);
+
+ if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+ if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
+ netif_start_queue(dev->net);
+ tasklet_schedule(&dev->bh);
+ }
+ }
+
+ ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ ret = lan78xx_write_reg(dev, WUCSR, 0);
+ ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+ ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
+ WUCSR2_ARP_RCD_ |
+ WUCSR2_IPV6_TCPSYN_RCD_ |
+ WUCSR2_IPV4_TCPSYN_RCD_);
+
+ ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
+ WUCSR_EEE_RX_WAKE_ |
+ WUCSR_PFDA_FR_ |
+ WUCSR_RFE_WAKE_FR_ |
+ WUCSR_WUFR_ |
+ WUCSR_MPR_ |
+ WUCSR_BCST_FR_);
+
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf |= MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+ return 0;
+}
+
+int lan78xx_reset_resume(struct usb_interface *intf)
+{
+ struct lan78xx_net *dev = usb_get_intfdata(intf);
+
+ lan78xx_reset(dev);
+ return lan78xx_resume(intf);
+}
+
+static const struct usb_device_id products[] = {
+ {
+ /* LAN7800 USB Gigabit Ethernet Device */
+ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
+ },
+ {
+ /* LAN7850 USB Gigabit Ethernet Device */
+ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver lan78xx_driver = {
+ .name = DRIVER_NAME,
+ .id_table = products,
+ .probe = lan78xx_probe,
+ .disconnect = lan78xx_disconnect,
+ .suspend = lan78xx_suspend,
+ .resume = lan78xx_resume,
+ .reset_resume = lan78xx_reset_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(lan78xx_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
new file mode 100644
index 000000000000..ae7562ee72ad
--- /dev/null
+++ b/drivers/net/usb/lan78xx.h
@@ -0,0 +1,1069 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _LAN78XX_H
+#define _LAN78XX_H
+
+/* USB Vendor Requests */
+#define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0
+#define USB_VENDOR_REQUEST_READ_REGISTER 0xA1
+#define USB_VENDOR_REQUEST_GET_STATS 0xA2
+
+/* Interrupt Endpoint status word bitfields */
+#define INT_ENP_EEE_START_TX_LPI_INT BIT(26)
+#define INT_ENP_EEE_STOP_TX_LPI_INT BIT(25)
+#define INT_ENP_EEE_RX_LPI_INT BIT(24)
+#define INT_ENP_RDFO_INT BIT(22)
+#define INT_ENP_TXE_INT BIT(21)
+#define INT_ENP_TX_DIS_INT BIT(19)
+#define INT_ENP_RX_DIS_INT BIT(18)
+#define INT_ENP_PHY_INT BIT(17)
+#define INT_ENP_DP_INT BIT(16)
+#define INT_ENP_MAC_ERR_INT BIT(15)
+#define INT_ENP_TDFU_INT BIT(14)
+#define INT_ENP_TDFO_INT BIT(13)
+#define INT_ENP_UTX_FP_INT BIT(12)
+
+#define TX_PKT_ALIGNMENT 4
+#define RX_PKT_ALIGNMENT 4
+
+/* Tx Command A */
+#define TX_CMD_A_IGE_ (0x20000000)
+#define TX_CMD_A_ICE_ (0x10000000)
+#define TX_CMD_A_LSO_ (0x08000000)
+#define TX_CMD_A_IPE_ (0x04000000)
+#define TX_CMD_A_TPE_ (0x02000000)
+#define TX_CMD_A_IVTG_ (0x01000000)
+#define TX_CMD_A_RVTG_ (0x00800000)
+#define TX_CMD_A_FCS_ (0x00400000)
+#define TX_CMD_A_LEN_MASK_ (0x000FFFFF)
+
+/* Tx Command B */
+#define TX_CMD_B_MSS_SHIFT_ (16)
+#define TX_CMD_B_MSS_MASK_ (0x3FFF0000)
+#define TX_CMD_B_MSS_MIN_ ((unsigned short)8)
+#define TX_CMD_B_VTAG_MASK_ (0x0000FFFF)
+#define TX_CMD_B_VTAG_PRI_MASK_ (0x0000E000)
+#define TX_CMD_B_VTAG_CFI_MASK_ (0x00001000)
+#define TX_CMD_B_VTAG_VID_MASK_ (0x00000FFF)
+
+/* Rx Command A */
+#define RX_CMD_A_ICE_ (0x80000000)
+#define RX_CMD_A_TCE_ (0x40000000)
+#define RX_CMD_A_CSE_MASK_ (0xC0000000)
+#define RX_CMD_A_IPV_ (0x20000000)
+#define RX_CMD_A_PID_MASK_ (0x18000000)
+#define RX_CMD_A_PID_NONE_IP_ (0x00000000)
+#define RX_CMD_A_PID_TCP_IP_ (0x08000000)
+#define RX_CMD_A_PID_UDP_IP_ (0x10000000)
+#define RX_CMD_A_PID_IP_ (0x18000000)
+#define RX_CMD_A_PFF_ (0x04000000)
+#define RX_CMD_A_BAM_ (0x02000000)
+#define RX_CMD_A_MAM_ (0x01000000)
+#define RX_CMD_A_FVTG_ (0x00800000)
+#define RX_CMD_A_RED_ (0x00400000)
+#define RX_CMD_A_RX_ERRS_MASK_ (0xC03F0000)
+#define RX_CMD_A_RWT_ (0x00200000)
+#define RX_CMD_A_RUNT_ (0x00100000)
+#define RX_CMD_A_LONG_ (0x00080000)
+#define RX_CMD_A_RXE_ (0x00040000)
+#define RX_CMD_A_DRB_ (0x00020000)
+#define RX_CMD_A_FCS_ (0x00010000)
+#define RX_CMD_A_UAM_ (0x00008000)
+#define RX_CMD_A_ICSM_ (0x00004000)
+#define RX_CMD_A_LEN_MASK_ (0x00003FFF)
+
+/* Rx Command B */
+#define RX_CMD_B_CSUM_SHIFT_ (16)
+#define RX_CMD_B_CSUM_MASK_ (0xFFFF0000)
+#define RX_CMD_B_VTAG_MASK_ (0x0000FFFF)
+#define RX_CMD_B_VTAG_PRI_MASK_ (0x0000E000)
+#define RX_CMD_B_VTAG_CFI_MASK_ (0x00001000)
+#define RX_CMD_B_VTAG_VID_MASK_ (0x00000FFF)
+
+/* Rx Command C */
+#define RX_CMD_C_WAKE_SHIFT_ (15)
+#define RX_CMD_C_WAKE_ (0x8000)
+#define RX_CMD_C_REF_FAIL_SHIFT_ (14)
+#define RX_CMD_C_REF_FAIL_ (0x4000)
+
+/* SCSRs */
+#define NUMBER_OF_REGS (193)
+
+#define ID_REV (0x00)
+#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000)
+#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
+#define ID_REV_CHIP_ID_7800_ (0x7800)
+
+#define FPGA_REV (0x04)
+#define FPGA_REV_MINOR_MASK_ (0x0000FF00)
+#define FPGA_REV_MAJOR_MASK_ (0x000000FF)
+
+#define INT_STS (0x0C)
+#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF)
+#define INT_STS_EEE_TX_LPI_STRT_ (0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_ (0x02000000)
+#define INT_STS_EEE_RX_LPI_ (0x01000000)
+#define INT_STS_RDFO_ (0x00400000)
+#define INT_STS_TXE_ (0x00200000)
+#define INT_STS_TX_DIS_ (0x00080000)
+#define INT_STS_RX_DIS_ (0x00040000)
+#define INT_STS_PHY_INT_ (0x00020000)
+#define INT_STS_DP_INT_ (0x00010000)
+#define INT_STS_MAC_ERR_ (0x00008000)
+#define INT_STS_TDFU_ (0x00004000)
+#define INT_STS_TDFO_ (0x00002000)
+#define INT_STS_UFX_FP_ (0x00001000)
+#define INT_STS_GPIO_MASK_ (0x00000FFF)
+#define INT_STS_GPIO11_ (0x00000800)
+#define INT_STS_GPIO10_ (0x00000400)
+#define INT_STS_GPIO9_ (0x00000200)
+#define INT_STS_GPIO8_ (0x00000100)
+#define INT_STS_GPIO7_ (0x00000080)
+#define INT_STS_GPIO6_ (0x00000040)
+#define INT_STS_GPIO5_ (0x00000020)
+#define INT_STS_GPIO4_ (0x00000010)
+#define INT_STS_GPIO3_ (0x00000008)
+#define INT_STS_GPIO2_ (0x00000004)
+#define INT_STS_GPIO1_ (0x00000002)
+#define INT_STS_GPIO0_ (0x00000001)
+
+#define HW_CFG (0x010)
+#define HW_CFG_CLK125_EN_ (0x02000000)
+#define HW_CFG_REFCLK25_EN_ (0x01000000)
+#define HW_CFG_LED3_EN_ (0x00800000)
+#define HW_CFG_LED2_EN_ (0x00400000)
+#define HW_CFG_LED1_EN_ (0x00200000)
+#define HW_CFG_LED0_EN_ (0x00100000)
+#define HW_CFG_EEE_PHY_LUSU_ (0x00020000)
+#define HW_CFG_EEE_TSU_ (0x00010000)
+#define HW_CFG_NETDET_STS_ (0x00008000)
+#define HW_CFG_NETDET_EN_ (0x00004000)
+#define HW_CFG_EEM_ (0x00002000)
+#define HW_CFG_RST_PROTECT_ (0x00001000)
+#define HW_CFG_CONNECT_BUF_ (0x00000400)
+#define HW_CFG_CONNECT_EN_ (0x00000200)
+#define HW_CFG_CONNECT_POL_ (0x00000100)
+#define HW_CFG_SUSPEND_N_SEL_MASK_ (0x000000C0)
+#define HW_CFG_SUSPEND_N_SEL_2 (0x00000000)
+#define HW_CFG_SUSPEND_N_SEL_12N (0x00000040)
+#define HW_CFG_SUSPEND_N_SEL_012N (0x00000080)
+#define HW_CFG_SUSPEND_N_SEL_0123N (0x000000C0)
+#define HW_CFG_SUSPEND_N_POL_ (0x00000020)
+#define HW_CFG_MEF_ (0x00000010)
+#define HW_CFG_ETC_ (0x00000008)
+#define HW_CFG_LRST_ (0x00000002)
+#define HW_CFG_SRST_ (0x00000001)
+
+#define PMT_CTL (0x014)
+#define PMT_CTL_EEE_WAKEUP_EN_ (0x00002000)
+#define PMT_CTL_EEE_WUPS_ (0x00001000)
+#define PMT_CTL_MAC_SRST_ (0x00000800)
+#define PMT_CTL_PHY_PWRUP_ (0x00000400)
+#define PMT_CTL_RES_CLR_WKP_MASK_ (0x00000300)
+#define PMT_CTL_RES_CLR_WKP_STS_ (0x00000200)
+#define PMT_CTL_RES_CLR_WKP_EN_ (0x00000100)
+#define PMT_CTL_READY_ (0x00000080)
+#define PMT_CTL_SUS_MODE_MASK_ (0x00000060)
+#define PMT_CTL_SUS_MODE_0_ (0x00000000)
+#define PMT_CTL_SUS_MODE_1_ (0x00000020)
+#define PMT_CTL_SUS_MODE_2_ (0x00000040)
+#define PMT_CTL_SUS_MODE_3_ (0x00000060)
+#define PMT_CTL_PHY_RST_ (0x00000010)
+#define PMT_CTL_WOL_EN_ (0x00000008)
+#define PMT_CTL_PHY_WAKE_EN_ (0x00000004)
+#define PMT_CTL_WUPS_MASK_ (0x00000003)
+#define PMT_CTL_WUPS_MLT_ (0x00000003)
+#define PMT_CTL_WUPS_MAC_ (0x00000002)
+#define PMT_CTL_WUPS_PHY_ (0x00000001)
+
+#define GPIO_CFG0 (0x018)
+#define GPIO_CFG0_GPIOEN_MASK_ (0x0000F000)
+#define GPIO_CFG0_GPIOEN3_ (0x00008000)
+#define GPIO_CFG0_GPIOEN2_ (0x00004000)
+#define GPIO_CFG0_GPIOEN1_ (0x00002000)
+#define GPIO_CFG0_GPIOEN0_ (0x00001000)
+#define GPIO_CFG0_GPIOBUF_MASK_ (0x00000F00)
+#define GPIO_CFG0_GPIOBUF3_ (0x00000800)
+#define GPIO_CFG0_GPIOBUF2_ (0x00000400)
+#define GPIO_CFG0_GPIOBUF1_ (0x00000200)
+#define GPIO_CFG0_GPIOBUF0_ (0x00000100)
+#define GPIO_CFG0_GPIODIR_MASK_ (0x000000F0)
+#define GPIO_CFG0_GPIODIR3_ (0x00000080)
+#define GPIO_CFG0_GPIODIR2_ (0x00000040)
+#define GPIO_CFG0_GPIODIR1_ (0x00000020)
+#define GPIO_CFG0_GPIODIR0_ (0x00000010)
+#define GPIO_CFG0_GPIOD_MASK_ (0x0000000F)
+#define GPIO_CFG0_GPIOD3_ (0x00000008)
+#define GPIO_CFG0_GPIOD2_ (0x00000004)
+#define GPIO_CFG0_GPIOD1_ (0x00000002)
+#define GPIO_CFG0_GPIOD0_ (0x00000001)
+
+#define GPIO_CFG1 (0x01C)
+#define GPIO_CFG1_GPIOEN_MASK_ (0xFF000000)
+#define GPIO_CFG1_GPIOEN11_ (0x80000000)
+#define GPIO_CFG1_GPIOEN10_ (0x40000000)
+#define GPIO_CFG1_GPIOEN9_ (0x20000000)
+#define GPIO_CFG1_GPIOEN8_ (0x10000000)
+#define GPIO_CFG1_GPIOEN7_ (0x08000000)
+#define GPIO_CFG1_GPIOEN6_ (0x04000000)
+#define GPIO_CFG1_GPIOEN5_ (0x02000000)
+#define GPIO_CFG1_GPIOEN4_ (0x01000000)
+#define GPIO_CFG1_GPIOBUF_MASK_ (0x00FF0000)
+#define GPIO_CFG1_GPIOBUF11_ (0x00800000)
+#define GPIO_CFG1_GPIOBUF10_ (0x00400000)
+#define GPIO_CFG1_GPIOBUF9_ (0x00200000)
+#define GPIO_CFG1_GPIOBUF8_ (0x00100000)
+#define GPIO_CFG1_GPIOBUF7_ (0x00080000)
+#define GPIO_CFG1_GPIOBUF6_ (0x00040000)
+#define GPIO_CFG1_GPIOBUF5_ (0x00020000)
+#define GPIO_CFG1_GPIOBUF4_ (0x00010000)
+#define GPIO_CFG1_GPIODIR_MASK_ (0x0000FF00)
+#define GPIO_CFG1_GPIODIR11_ (0x00008000)
+#define GPIO_CFG1_GPIODIR10_ (0x00004000)
+#define GPIO_CFG1_GPIODIR9_ (0x00002000)
+#define GPIO_CFG1_GPIODIR8_ (0x00001000)
+#define GPIO_CFG1_GPIODIR7_ (0x00000800)
+#define GPIO_CFG1_GPIODIR6_ (0x00000400)
+#define GPIO_CFG1_GPIODIR5_ (0x00000200)
+#define GPIO_CFG1_GPIODIR4_ (0x00000100)
+#define GPIO_CFG1_GPIOD_MASK_ (0x000000FF)
+#define GPIO_CFG1_GPIOD11_ (0x00000080)
+#define GPIO_CFG1_GPIOD10_ (0x00000040)
+#define GPIO_CFG1_GPIOD9_ (0x00000020)
+#define GPIO_CFG1_GPIOD8_ (0x00000010)
+#define GPIO_CFG1_GPIOD7_ (0x00000008)
+#define GPIO_CFG1_GPIOD6_ (0x00000004)
+#define GPIO_CFG1_GPIOD6_ (0x00000004)
+#define GPIO_CFG1_GPIOD5_ (0x00000002)
+#define GPIO_CFG1_GPIOD4_ (0x00000001)
+
+#define GPIO_WAKE (0x020)
+#define GPIO_WAKE_GPIOPOL_MASK_ (0x0FFF0000)
+#define GPIO_WAKE_GPIOPOL11_ (0x08000000)
+#define GPIO_WAKE_GPIOPOL10_ (0x04000000)
+#define GPIO_WAKE_GPIOPOL9_ (0x02000000)
+#define GPIO_WAKE_GPIOPOL8_ (0x01000000)
+#define GPIO_WAKE_GPIOPOL7_ (0x00800000)
+#define GPIO_WAKE_GPIOPOL6_ (0x00400000)
+#define GPIO_WAKE_GPIOPOL5_ (0x00200000)
+#define GPIO_WAKE_GPIOPOL4_ (0x00100000)
+#define GPIO_WAKE_GPIOPOL3_ (0x00080000)
+#define GPIO_WAKE_GPIOPOL2_ (0x00040000)
+#define GPIO_WAKE_GPIOPOL1_ (0x00020000)
+#define GPIO_WAKE_GPIOPOL0_ (0x00010000)
+#define GPIO_WAKE_GPIOWK_MASK_ (0x00000FFF)
+#define GPIO_WAKE_GPIOWK11_ (0x00000800)
+#define GPIO_WAKE_GPIOWK10_ (0x00000400)
+#define GPIO_WAKE_GPIOWK9_ (0x00000200)
+#define GPIO_WAKE_GPIOWK8_ (0x00000100)
+#define GPIO_WAKE_GPIOWK7_ (0x00000080)
+#define GPIO_WAKE_GPIOWK6_ (0x00000040)
+#define GPIO_WAKE_GPIOWK5_ (0x00000020)
+#define GPIO_WAKE_GPIOWK4_ (0x00000010)
+#define GPIO_WAKE_GPIOWK3_ (0x00000008)
+#define GPIO_WAKE_GPIOWK2_ (0x00000004)
+#define GPIO_WAKE_GPIOWK1_ (0x00000002)
+#define GPIO_WAKE_GPIOWK0_ (0x00000001)
+
+#define DP_SEL (0x024)
+#define DP_SEL_DPRDY_ (0x80000000)
+#define DP_SEL_RSEL_MASK_ (0x0000000F)
+#define DP_SEL_RSEL_USB_PHY_CSRS_ (0x0000000F)
+#define DP_SEL_RSEL_OTP_64BIT_ (0x00000009)
+#define DP_SEL_RSEL_OTP_8BIT_ (0x00000008)
+#define DP_SEL_RSEL_UTX_BUF_RAM_ (0x00000007)
+#define DP_SEL_RSEL_DESC_RAM_ (0x00000005)
+#define DP_SEL_RSEL_TXFIFO_ (0x00000004)
+#define DP_SEL_RSEL_RXFIFO_ (0x00000003)
+#define DP_SEL_RSEL_LSO_ (0x00000002)
+#define DP_SEL_RSEL_VLAN_DA_ (0x00000001)
+#define DP_SEL_RSEL_URXBUF_ (0x00000000)
+#define DP_SEL_VHF_HASH_LEN (16)
+#define DP_SEL_VHF_VLAN_LEN (128)
+
+#define DP_CMD (0x028)
+#define DP_CMD_WRITE_ (0x00000001)
+#define DP_CMD_READ_ (0x00000000)
+
+#define DP_ADDR (0x02C)
+#define DP_ADDR_MASK_ (0x00003FFF)
+
+#define DP_DATA (0x030)
+
+#define E2P_CMD (0x040)
+#define E2P_CMD_EPC_BUSY_ (0x80000000)
+#define E2P_CMD_EPC_CMD_MASK_ (0x70000000)
+#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000)
+#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000)
+#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000)
+#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000)
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000)
+#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000)
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_ (0x00000400)
+#define E2P_CMD_EPC_DL_ (0x00000200)
+#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF)
+
+#define E2P_DATA (0x044)
+#define E2P_DATA_EEPROM_DATA_MASK_ (0x000000FF)
+
+#define BOS_ATTR (0x050)
+#define BOS_ATTR_BLOCK_SIZE_MASK_ (0x000000FF)
+
+#define SS_ATTR (0x054)
+#define SS_ATTR_POLL_INT_MASK_ (0x00FF0000)
+#define SS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00)
+#define SS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF)
+
+#define HS_ATTR (0x058)
+#define HS_ATTR_POLL_INT_MASK_ (0x00FF0000)
+#define HS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00)
+#define HS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF)
+
+#define FS_ATTR (0x05C)
+#define FS_ATTR_POLL_INT_MASK_ (0x00FF0000)
+#define FS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00)
+#define FS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF)
+
+#define STR_ATTR0 (0x060)
+#define STR_ATTR0_CFGSTR_DESC_SIZE_MASK_ (0xFF000000)
+#define STR_ATTR0_SERSTR_DESC_SIZE_MASK_ (0x00FF0000)
+#define STR_ATTR0_PRODSTR_DESC_SIZE_MASK_ (0x0000FF00)
+#define STR_ATTR0_MANUF_DESC_SIZE_MASK_ (0x000000FF)
+
+#define STR_ATTR1 (0x064)
+#define STR_ATTR1_INTSTR_DESC_SIZE_MASK_ (0x000000FF)
+
+#define STR_FLAG_ATTR (0x068)
+#define STR_FLAG_ATTR_PME_FLAGS_MASK_ (0x000000FF)
+
+#define USB_CFG0 (0x080)
+#define USB_CFG_LPM_RESPONSE_ (0x80000000)
+#define USB_CFG_LPM_CAPABILITY_ (0x40000000)
+#define USB_CFG_LPM_ENBL_SLPM_ (0x20000000)
+#define USB_CFG_HIRD_THR_MASK_ (0x1F000000)
+#define USB_CFG_HIRD_THR_960_ (0x1C000000)
+#define USB_CFG_HIRD_THR_885_ (0x1B000000)
+#define USB_CFG_HIRD_THR_810_ (0x1A000000)
+#define USB_CFG_HIRD_THR_735_ (0x19000000)
+#define USB_CFG_HIRD_THR_660_ (0x18000000)
+#define USB_CFG_HIRD_THR_585_ (0x17000000)
+#define USB_CFG_HIRD_THR_510_ (0x16000000)
+#define USB_CFG_HIRD_THR_435_ (0x15000000)
+#define USB_CFG_HIRD_THR_360_ (0x14000000)
+#define USB_CFG_HIRD_THR_285_ (0x13000000)
+#define USB_CFG_HIRD_THR_210_ (0x12000000)
+#define USB_CFG_HIRD_THR_135_ (0x11000000)
+#define USB_CFG_HIRD_THR_60_ (0x10000000)
+#define USB_CFG_MAX_BURST_BI_MASK_ (0x00F00000)
+#define USB_CFG_MAX_BURST_BO_MASK_ (0x000F0000)
+#define USB_CFG_MAX_DEV_SPEED_MASK_ (0x0000E000)
+#define USB_CFG_MAX_DEV_SPEED_SS_ (0x00008000)
+#define USB_CFG_MAX_DEV_SPEED_HS_ (0x00000000)
+#define USB_CFG_MAX_DEV_SPEED_FS_ (0x00002000)
+#define USB_CFG_PHY_BOOST_MASK_ (0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_12_ (0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_8_ (0x00000100)
+#define USB_CFG_PHY_BOOST_PLUS_4_ (0x00000080)
+#define USB_CFG_PHY_BOOST_NORMAL_ (0x00000000)
+#define USB_CFG_BIR_ (0x00000040)
+#define USB_CFG_BCE_ (0x00000020)
+#define USB_CFG_PORT_SWAP_ (0x00000010)
+#define USB_CFG_LPM_EN_ (0x00000008)
+#define USB_CFG_RMT_WKP_ (0x00000004)
+#define USB_CFG_PWR_SEL_ (0x00000002)
+#define USB_CFG_STALL_BO_DIS_ (0x00000001)
+
+#define USB_CFG1 (0x084)
+#define USB_CFG1_U1_TIMEOUT_MASK_ (0xFF000000)
+#define USB_CFG1_U2_TIMEOUT_MASK_ (0x00FF0000)
+#define USB_CFG1_HS_TOUT_CAL_MASK_ (0x0000E000)
+#define USB_CFG1_DEV_U2_INIT_EN_ (0x00001000)
+#define USB_CFG1_DEV_U2_EN_ (0x00000800)
+#define USB_CFG1_DEV_U1_INIT_EN_ (0x00000400)
+#define USB_CFG1_DEV_U1_EN_ (0x00000200)
+#define USB_CFG1_LTM_ENABLE_ (0x00000100)
+#define USB_CFG1_FS_TOUT_CAL_MASK_ (0x00000070)
+#define USB_CFG1_SCALE_DOWN_MASK_ (0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE3_ (0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE2_ (0x00000002)
+#define USB_CFG1_SCALE_DOWN_MODE1_ (0x00000001)
+#define USB_CFG1_SCALE_DOWN_MODE0_ (0x00000000)
+
+#define USB_CFG2 (0x088)
+#define USB_CFG2_SS_DETACH_TIME_MASK_ (0xFFFF0000)
+#define USB_CFG2_HS_DETACH_TIME_MASK_ (0x0000FFFF)
+
+#define BURST_CAP (0x090)
+#define BURST_CAP_SIZE_MASK_ (0x000000FF)
+
+#define BULK_IN_DLY (0x094)
+#define BULK_IN_DLY_MASK_ (0x0000FFFF)
+
+#define INT_EP_CTL (0x098)
+#define INT_EP_INTEP_ON_ (0x80000000)
+#define INT_STS_EEE_TX_LPI_STRT_EN_ (0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_EN_ (0x02000000)
+#define INT_STS_EEE_RX_LPI_EN_ (0x01000000)
+#define INT_EP_RDFO_EN_ (0x00400000)
+#define INT_EP_TXE_EN_ (0x00200000)
+#define INT_EP_TX_DIS_EN_ (0x00080000)
+#define INT_EP_RX_DIS_EN_ (0x00040000)
+#define INT_EP_PHY_INT_EN_ (0x00020000)
+#define INT_EP_DP_INT_EN_ (0x00010000)
+#define INT_EP_MAC_ERR_EN_ (0x00008000)
+#define INT_EP_TDFU_EN_ (0x00004000)
+#define INT_EP_TDFO_EN_ (0x00002000)
+#define INT_EP_UTX_FP_EN_ (0x00001000)
+#define INT_EP_GPIO_EN_MASK_ (0x00000FFF)
+
+#define PIPE_CTL (0x09C)
+#define PIPE_CTL_TXSWING_ (0x00000040)
+#define PIPE_CTL_TXMARGIN_MASK_ (0x00000038)
+#define PIPE_CTL_TXDEEMPHASIS_MASK_ (0x00000006)
+#define PIPE_CTL_ELASTICITYBUFFERMODE_ (0x00000001)
+
+#define U1_LATENCY (0xA0)
+#define U2_LATENCY (0xA4)
+
+#define USB_STATUS (0x0A8)
+#define USB_STATUS_REMOTE_WK_ (0x00100000)
+#define USB_STATUS_FUNC_REMOTE_WK_ (0x00080000)
+#define USB_STATUS_LTM_ENABLE_ (0x00040000)
+#define USB_STATUS_U2_ENABLE_ (0x00020000)
+#define USB_STATUS_U1_ENABLE_ (0x00010000)
+#define USB_STATUS_SET_SEL_ (0x00000020)
+#define USB_STATUS_REMOTE_WK_STS_ (0x00000010)
+#define USB_STATUS_FUNC_REMOTE_WK_STS_ (0x00000008)
+#define USB_STATUS_LTM_ENABLE_STS_ (0x00000004)
+#define USB_STATUS_U2_ENABLE_STS_ (0x00000002)
+#define USB_STATUS_U1_ENABLE_STS_ (0x00000001)
+
+#define USB_CFG3 (0x0AC)
+#define USB_CFG3_EN_U2_LTM_ (0x40000000)
+#define USB_CFG3_BULK_OUT_NUMP_OVR_ (0x20000000)
+#define USB_CFG3_DIS_FAST_U1_EXIT_ (0x10000000)
+#define USB_CFG3_LPM_NYET_THR_ (0x0F000000)
+#define USB_CFG3_RX_DET_2_POL_LFPS_ (0x00800000)
+#define USB_CFG3_LFPS_FILT_ (0x00400000)
+#define USB_CFG3_SKIP_RX_DET_ (0x00200000)
+#define USB_CFG3_DELAY_P1P2P3_ (0x001C0000)
+#define USB_CFG3_DELAY_PHY_PWR_CHG_ (0x00020000)
+#define USB_CFG3_U1U2_EXIT_FR_ (0x00010000)
+#define USB_CFG3_REQ_P1P2P3 (0x00008000)
+#define USB_CFG3_HST_PRT_CMPL_ (0x00004000)
+#define USB_CFG3_DIS_SCRAMB_ (0x00002000)
+#define USB_CFG3_PWR_DN_SCALE_ (0x00001FFF)
+
+#define RFE_CTL (0x0B0)
+#define RFE_CTL_IGMP_COE_ (0x00004000)
+#define RFE_CTL_ICMP_COE_ (0x00002000)
+#define RFE_CTL_TCPUDP_COE_ (0x00001000)
+#define RFE_CTL_IP_COE_ (0x00000800)
+#define RFE_CTL_BCAST_EN_ (0x00000400)
+#define RFE_CTL_MCAST_EN_ (0x00000200)
+#define RFE_CTL_UCAST_EN_ (0x00000100)
+#define RFE_CTL_VLAN_STRIP_ (0x00000080)
+#define RFE_CTL_DISCARD_UNTAGGED_ (0x00000040)
+#define RFE_CTL_VLAN_FILTER_ (0x00000020)
+#define RFE_CTL_SA_FILTER_ (0x00000010)
+#define RFE_CTL_MCAST_HASH_ (0x00000008)
+#define RFE_CTL_DA_HASH_ (0x00000004)
+#define RFE_CTL_DA_PERFECT_ (0x00000002)
+#define RFE_CTL_RST_ (0x00000001)
+
+#define VLAN_TYPE (0x0B4)
+#define VLAN_TYPE_MASK_ (0x0000FFFF)
+
+#define FCT_RX_CTL (0x0C0)
+#define FCT_RX_CTL_EN_ (0x80000000)
+#define FCT_RX_CTL_RST_ (0x40000000)
+#define FCT_RX_CTL_SBF_ (0x02000000)
+#define FCT_RX_CTL_OVFL_ (0x01000000)
+#define FCT_RX_CTL_DROP_ (0x00800000)
+#define FCT_RX_CTL_NOT_EMPTY_ (0x00400000)
+#define FCT_RX_CTL_EMPTY_ (0x00200000)
+#define FCT_RX_CTL_DIS_ (0x00100000)
+#define FCT_RX_CTL_USED_MASK_ (0x0000FFFF)
+
+#define FCT_TX_CTL (0x0C4)
+#define FCT_TX_CTL_EN_ (0x80000000)
+#define FCT_TX_CTL_RST_ (0x40000000)
+#define FCT_TX_CTL_NOT_EMPTY_ (0x00400000)
+#define FCT_TX_CTL_EMPTY_ (0x00200000)
+#define FCT_TX_CTL_DIS_ (0x00100000)
+#define FCT_TX_CTL_USED_MASK_ (0x0000FFFF)
+
+#define FCT_RX_FIFO_END (0x0C8)
+#define FCT_RX_FIFO_END_MASK_ (0x0000007F)
+
+#define FCT_TX_FIFO_END (0x0CC)
+#define FCT_TX_FIFO_END_MASK_ (0x0000003F)
+
+#define FCT_FLOW (0x0D0)
+#define FCT_FLOW_OFF_MASK_ (0x00007F00)
+#define FCT_FLOW_ON_MASK_ (0x0000007F)
+
+#define RX_DP_STOR (0x0D4)
+#define RX_DP_STORE_TOT_RXUSED_MASK_ (0xFFFF0000)
+#define RX_DP_STORE_UTX_RXUSED_MASK_ (0x0000FFFF)
+
+#define TX_DP_STOR (0x0D8)
+#define TX_DP_STORE_TOT_TXUSED_MASK_ (0xFFFF0000)
+#define TX_DP_STORE_URX_TXUSED_MASK_ (0x0000FFFF)
+
+#define LTM_BELT_IDLE0 (0x0E0)
+#define LTM_BELT_IDLE0_IDLE1000_ (0x0FFF0000)
+#define LTM_BELT_IDLE0_IDLE100_ (0x00000FFF)
+
+#define LTM_BELT_IDLE1 (0x0E4)
+#define LTM_BELT_IDLE1_IDLE10_ (0x00000FFF)
+
+#define LTM_BELT_ACT0 (0x0E8)
+#define LTM_BELT_ACT0_ACT1000_ (0x0FFF0000)
+#define LTM_BELT_ACT0_ACT100_ (0x00000FFF)
+
+#define LTM_BELT_ACT1 (0x0EC)
+#define LTM_BELT_ACT1_ACT10_ (0x00000FFF)
+
+#define LTM_INACTIVE0 (0x0F0)
+#define LTM_INACTIVE0_TIMER1000_ (0xFFFF0000)
+#define LTM_INACTIVE0_TIMER100_ (0x0000FFFF)
+
+#define LTM_INACTIVE1 (0x0F4)
+#define LTM_INACTIVE1_TIMER10_ (0x0000FFFF)
+
+#define MAC_CR (0x100)
+#define MAC_CR_GMII_EN_ (0x00080000)
+#define MAC_CR_EEE_TX_CLK_STOP_EN_ (0x00040000)
+#define MAC_CR_EEE_EN_ (0x00020000)
+#define MAC_CR_EEE_TLAR_EN_ (0x00010000)
+#define MAC_CR_ADP_ (0x00002000)
+#define MAC_CR_AUTO_DUPLEX_ (0x00001000)
+#define MAC_CR_AUTO_SPEED_ (0x00000800)
+#define MAC_CR_LOOPBACK_ (0x00000400)
+#define MAC_CR_BOLMT_MASK_ (0x000000C0)
+#define MAC_CR_FULL_DUPLEX_ (0x00000008)
+#define MAC_CR_SPEED_MASK_ (0x00000006)
+#define MAC_CR_SPEED_1000_ (0x00000004)
+#define MAC_CR_SPEED_100_ (0x00000002)
+#define MAC_CR_SPEED_10_ (0x00000000)
+#define MAC_CR_RST_ (0x00000001)
+
+#define MAC_RX (0x104)
+#define MAC_RX_MAX_SIZE_SHIFT_ (16)
+#define MAC_RX_MAX_SIZE_MASK_ (0x3FFF0000)
+#define MAC_RX_FCS_STRIP_ (0x00000010)
+#define MAC_RX_VLAN_FSE_ (0x00000004)
+#define MAC_RX_RXD_ (0x00000002)
+#define MAC_RX_RXEN_ (0x00000001)
+
+#define MAC_TX (0x108)
+#define MAC_TX_BAD_FCS_ (0x00000004)
+#define MAC_TX_TXD_ (0x00000002)
+#define MAC_TX_TXEN_ (0x00000001)
+
+#define FLOW (0x10C)
+#define FLOW_CR_FORCE_FC_ (0x80000000)
+#define FLOW_CR_TX_FCEN_ (0x40000000)
+#define FLOW_CR_RX_FCEN_ (0x20000000)
+#define FLOW_CR_FPF_ (0x10000000)
+#define FLOW_CR_FCPT_MASK_ (0x0000FFFF)
+
+#define RAND_SEED (0x110)
+#define RAND_SEED_MASK_ (0x0000FFFF)
+
+#define ERR_STS (0x114)
+#define ERR_STS_FERR_ (0x00000100)
+#define ERR_STS_LERR_ (0x00000080)
+#define ERR_STS_RFERR_ (0x00000040)
+#define ERR_STS_ECERR_ (0x00000010)
+#define ERR_STS_ALERR_ (0x00000008)
+#define ERR_STS_URERR_ (0x00000004)
+
+#define RX_ADDRH (0x118)
+#define RX_ADDRH_MASK_ (0x0000FFFF)
+
+#define RX_ADDRL (0x11C)
+#define RX_ADDRL_MASK_ (0xFFFFFFFF)
+
+#define MII_ACC (0x120)
+#define MII_ACC_PHY_ADDR_SHIFT_ (11)
+#define MII_ACC_PHY_ADDR_MASK_ (0x0000F800)
+#define MII_ACC_MIIRINDA_SHIFT_ (6)
+#define MII_ACC_MIIRINDA_MASK_ (0x000007C0)
+#define MII_ACC_MII_READ_ (0x00000000)
+#define MII_ACC_MII_WRITE_ (0x00000002)
+#define MII_ACC_MII_BUSY_ (0x00000001)
+
+#define MII_DATA (0x124)
+#define MII_DATA_MASK_ (0x0000FFFF)
+
+#define MAC_RGMII_ID (0x128)
+#define MAC_RGMII_ID_TXC_DELAY_EN_ (0x00000002)
+#define MAC_RGMII_ID_RXC_DELAY_EN_ (0x00000001)
+
+#define EEE_TX_LPI_REQ_DLY (0x130)
+#define EEE_TX_LPI_REQ_DLY_CNT_MASK_ (0xFFFFFFFF)
+
+#define EEE_TW_TX_SYS (0x134)
+#define EEE_TW_TX_SYS_CNT1G_MASK_ (0xFFFF0000)
+#define EEE_TW_TX_SYS_CNT100M_MASK_ (0x0000FFFF)
+
+#define EEE_TX_LPI_REM_DLY (0x138)
+#define EEE_TX_LPI_REM_DLY_CNT_ (0x00FFFFFF)
+
+#define WUCSR (0x140)
+#define WUCSR_TESTMODE_ (0x80000000)
+#define WUCSR_RFE_WAKE_EN_ (0x00004000)
+#define WUCSR_EEE_TX_WAKE_ (0x00002000)
+#define WUCSR_EEE_TX_WAKE_EN_ (0x00001000)
+#define WUCSR_EEE_RX_WAKE_ (0x00000800)
+#define WUCSR_EEE_RX_WAKE_EN_ (0x00000400)
+#define WUCSR_RFE_WAKE_FR_ (0x00000200)
+#define WUCSR_STORE_WAKE_ (0x00000100)
+#define WUCSR_PFDA_FR_ (0x00000080)
+#define WUCSR_WUFR_ (0x00000040)
+#define WUCSR_MPR_ (0x00000020)
+#define WUCSR_BCST_FR_ (0x00000010)
+#define WUCSR_PFDA_EN_ (0x00000008)
+#define WUCSR_WAKE_EN_ (0x00000004)
+#define WUCSR_MPEN_ (0x00000002)
+#define WUCSR_BCST_EN_ (0x00000001)
+
+#define WK_SRC (0x144)
+#define WK_SRC_GPIOX_INT_WK_SHIFT_ (20)
+#define WK_SRC_GPIOX_INT_WK_MASK_ (0xFFF00000)
+#define WK_SRC_IPV6_TCPSYN_RCD_WK_ (0x00010000)
+#define WK_SRC_IPV4_TCPSYN_RCD_WK_ (0x00008000)
+#define WK_SRC_EEE_TX_WK_ (0x00004000)
+#define WK_SRC_EEE_RX_WK_ (0x00002000)
+#define WK_SRC_GOOD_FR_WK_ (0x00001000)
+#define WK_SRC_PFDA_FR_WK_ (0x00000800)
+#define WK_SRC_MP_FR_WK_ (0x00000400)
+#define WK_SRC_BCAST_FR_WK_ (0x00000200)
+#define WK_SRC_WU_FR_WK_ (0x00000100)
+#define WK_SRC_WUFF_MATCH_MASK_ (0x0000001F)
+
+#define WUF_CFG0 (0x150)
+#define NUM_OF_WUF_CFG (32)
+#define WUF_CFG_BEGIN (WUF_CFG0)
+#define WUF_CFG(index) (WUF_CFG_BEGIN + (4 * (index)))
+#define WUF_CFGX_EN_ (0x80000000)
+#define WUF_CFGX_TYPE_MASK_ (0x03000000)
+#define WUF_CFGX_TYPE_MCAST_ (0x02000000)
+#define WUF_CFGX_TYPE_ALL_ (0x01000000)
+#define WUF_CFGX_TYPE_UCAST_ (0x00000000)
+#define WUF_CFGX_OFFSET_SHIFT_ (16)
+#define WUF_CFGX_OFFSET_MASK_ (0x00FF0000)
+#define WUF_CFGX_CRC16_MASK_ (0x0000FFFF)
+
+#define WUF_MASK0_0 (0x200)
+#define WUF_MASK0_1 (0x204)
+#define WUF_MASK0_2 (0x208)
+#define WUF_MASK0_3 (0x20C)
+#define NUM_OF_WUF_MASK (32)
+#define WUF_MASK0_BEGIN (WUF_MASK0_0)
+#define WUF_MASK1_BEGIN (WUF_MASK0_1)
+#define WUF_MASK2_BEGIN (WUF_MASK0_2)
+#define WUF_MASK3_BEGIN (WUF_MASK0_3)
+#define WUF_MASK0(index) (WUF_MASK0_BEGIN + (0x10 * (index)))
+#define WUF_MASK1(index) (WUF_MASK1_BEGIN + (0x10 * (index)))
+#define WUF_MASK2(index) (WUF_MASK2_BEGIN + (0x10 * (index)))
+#define WUF_MASK3(index) (WUF_MASK3_BEGIN + (0x10 * (index)))
+
+#define MAF_BASE (0x400)
+#define MAF_HIX (0x00)
+#define MAF_LOX (0x04)
+#define NUM_OF_MAF (33)
+#define MAF_HI_BEGIN (MAF_BASE + MAF_HIX)
+#define MAF_LO_BEGIN (MAF_BASE + MAF_LOX)
+#define MAF_HI(index) (MAF_BASE + (8 * (index)) + (MAF_HIX))
+#define MAF_LO(index) (MAF_BASE + (8 * (index)) + (MAF_LOX))
+#define MAF_HI_VALID_ (0x80000000)
+#define MAF_HI_TYPE_MASK_ (0x40000000)
+#define MAF_HI_TYPE_SRC_ (0x40000000)
+#define MAF_HI_TYPE_DST_ (0x00000000)
+#define MAF_HI_ADDR_MASK (0x0000FFFF)
+#define MAF_LO_ADDR_MASK (0xFFFFFFFF)
+
+#define WUCSR2 (0x600)
+#define WUCSR2_CSUM_DISABLE_ (0x80000000)
+#define WUCSR2_NA_SA_SEL_ (0x00000100)
+#define WUCSR2_NS_RCD_ (0x00000080)
+#define WUCSR2_ARP_RCD_ (0x00000040)
+#define WUCSR2_IPV6_TCPSYN_RCD_ (0x00000020)
+#define WUCSR2_IPV4_TCPSYN_RCD_ (0x00000010)
+#define WUCSR2_NS_OFFLOAD_EN_ (0x00000008)
+#define WUCSR2_ARP_OFFLOAD_EN_ (0x00000004)
+#define WUCSR2_IPV6_TCPSYN_WAKE_EN_ (0x00000002)
+#define WUCSR2_IPV4_TCPSYN_WAKE_EN_ (0x00000001)
+
+#define NS1_IPV6_ADDR_DEST0 (0x610)
+#define NS1_IPV6_ADDR_DEST1 (0x614)
+#define NS1_IPV6_ADDR_DEST2 (0x618)
+#define NS1_IPV6_ADDR_DEST3 (0x61C)
+
+#define NS1_IPV6_ADDR_SRC0 (0x620)
+#define NS1_IPV6_ADDR_SRC1 (0x624)
+#define NS1_IPV6_ADDR_SRC2 (0x628)
+#define NS1_IPV6_ADDR_SRC3 (0x62C)
+
+#define NS1_ICMPV6_ADDR0_0 (0x630)
+#define NS1_ICMPV6_ADDR0_1 (0x634)
+#define NS1_ICMPV6_ADDR0_2 (0x638)
+#define NS1_ICMPV6_ADDR0_3 (0x63C)
+
+#define NS1_ICMPV6_ADDR1_0 (0x640)
+#define NS1_ICMPV6_ADDR1_1 (0x644)
+#define NS1_ICMPV6_ADDR1_2 (0x648)
+#define NS1_ICMPV6_ADDR1_3 (0x64C)
+
+#define NS2_IPV6_ADDR_DEST0 (0x650)
+#define NS2_IPV6_ADDR_DEST1 (0x654)
+#define NS2_IPV6_ADDR_DEST2 (0x658)
+#define NS2_IPV6_ADDR_DEST3 (0x65C)
+
+#define NS2_IPV6_ADDR_SRC0 (0x660)
+#define NS2_IPV6_ADDR_SRC1 (0x664)
+#define NS2_IPV6_ADDR_SRC2 (0x668)
+#define NS2_IPV6_ADDR_SRC3 (0x66C)
+
+#define NS2_ICMPV6_ADDR0_0 (0x670)
+#define NS2_ICMPV6_ADDR0_1 (0x674)
+#define NS2_ICMPV6_ADDR0_2 (0x678)
+#define NS2_ICMPV6_ADDR0_3 (0x67C)
+
+#define NS2_ICMPV6_ADDR1_0 (0x680)
+#define NS2_ICMPV6_ADDR1_1 (0x684)
+#define NS2_ICMPV6_ADDR1_2 (0x688)
+#define NS2_ICMPV6_ADDR1_3 (0x68C)
+
+#define SYN_IPV4_ADDR_SRC (0x690)
+#define SYN_IPV4_ADDR_DEST (0x694)
+#define SYN_IPV4_TCP_PORTS (0x698)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_SHIFT_ (16)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_MASK_ (0xFFFF0000)
+#define SYN_IPV4_TCP_PORTS_IPV4_SRC_PORT_MASK_ (0x0000FFFF)
+
+#define SYN_IPV6_ADDR_SRC0 (0x69C)
+#define SYN_IPV6_ADDR_SRC1 (0x6A0)
+#define SYN_IPV6_ADDR_SRC2 (0x6A4)
+#define SYN_IPV6_ADDR_SRC3 (0x6A8)
+
+#define SYN_IPV6_ADDR_DEST0 (0x6AC)
+#define SYN_IPV6_ADDR_DEST1 (0x6B0)
+#define SYN_IPV6_ADDR_DEST2 (0x6B4)
+#define SYN_IPV6_ADDR_DEST3 (0x6B8)
+
+#define SYN_IPV6_TCP_PORTS (0x6BC)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_SHIFT_ (16)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_MASK_ (0xFFFF0000)
+#define SYN_IPV6_TCP_PORTS_IPV6_SRC_PORT_MASK_ (0x0000FFFF)
+
+#define ARP_SPA (0x6C0)
+#define ARP_TPA (0x6C4)
+
+#define PHY_DEV_ID (0x700)
+#define PHY_DEV_ID_REV_SHIFT_ (28)
+#define PHY_DEV_ID_REV_SHIFT_ (28)
+#define PHY_DEV_ID_REV_MASK_ (0xF0000000)
+#define PHY_DEV_ID_MODEL_SHIFT_ (22)
+#define PHY_DEV_ID_MODEL_MASK_ (0x0FC00000)
+#define PHY_DEV_ID_OUI_MASK_ (0x003FFFFF)
+
+#define OTP_BASE_ADDR (0x00001000)
+#define OTP_ADDR_RANGE_ (0x1FF)
+
+#define OTP_PWR_DN (OTP_BASE_ADDR + 4 * 0x00)
+#define OTP_PWR_DN_PWRDN_N_ (0x01)
+
+#define OTP_ADDR1 (OTP_BASE_ADDR + 4 * 0x01)
+#define OTP_ADDR1_15_11 (0x1F)
+
+#define OTP_ADDR2 (OTP_BASE_ADDR + 4 * 0x02)
+#define OTP_ADDR2_10_3 (0xFF)
+
+#define OTP_ADDR3 (OTP_BASE_ADDR + 4 * 0x03)
+#define OTP_ADDR3_2_0 (0x03)
+
+#define OTP_PRGM_DATA (OTP_BASE_ADDR + 4 * 0x04)
+
+#define OTP_PRGM_MODE (OTP_BASE_ADDR + 4 * 0x05)
+#define OTP_PRGM_MODE_BYTE_ (0x01)
+
+#define OTP_RD_DATA (OTP_BASE_ADDR + 4 * 0x06)
+
+#define OTP_FUNC_CMD (OTP_BASE_ADDR + 4 * 0x08)
+#define OTP_FUNC_CMD_RESET_ (0x04)
+#define OTP_FUNC_CMD_PROGRAM_ (0x02)
+#define OTP_FUNC_CMD_READ_ (0x01)
+
+#define OTP_TST_CMD (OTP_BASE_ADDR + 4 * 0x09)
+#define OTP_TST_CMD_TEST_DEC_SEL_ (0x10)
+#define OTP_TST_CMD_PRGVRFY_ (0x08)
+#define OTP_TST_CMD_WRTEST_ (0x04)
+#define OTP_TST_CMD_TESTDEC_ (0x02)
+#define OTP_TST_CMD_BLANKCHECK_ (0x01)
+
+#define OTP_CMD_GO (OTP_BASE_ADDR + 4 * 0x0A)
+#define OTP_CMD_GO_GO_ (0x01)
+
+#define OTP_PASS_FAIL (OTP_BASE_ADDR + 4 * 0x0B)
+#define OTP_PASS_FAIL_PASS_ (0x02)
+#define OTP_PASS_FAIL_FAIL_ (0x01)
+
+#define OTP_STATUS (OTP_BASE_ADDR + 4 * 0x0C)
+#define OTP_STATUS_OTP_LOCK_ (0x10)
+#define OTP_STATUS_WEB_ (0x08)
+#define OTP_STATUS_PGMEN (0x04)
+#define OTP_STATUS_CPUMPEN_ (0x02)
+#define OTP_STATUS_BUSY_ (0x01)
+
+#define OTP_MAX_PRG (OTP_BASE_ADDR + 4 * 0x0D)
+#define OTP_MAX_PRG_MAX_PROG (0x1F)
+
+#define OTP_INTR_STATUS (OTP_BASE_ADDR + 4 * 0x10)
+#define OTP_INTR_STATUS_READY_ (0x01)
+
+#define OTP_INTR_MASK (OTP_BASE_ADDR + 4 * 0x11)
+#define OTP_INTR_MASK_READY_ (0x01)
+
+#define OTP_RSTB_PW1 (OTP_BASE_ADDR + 4 * 0x14)
+#define OTP_RSTB_PW2 (OTP_BASE_ADDR + 4 * 0x15)
+#define OTP_PGM_PW1 (OTP_BASE_ADDR + 4 * 0x18)
+#define OTP_PGM_PW2 (OTP_BASE_ADDR + 4 * 0x19)
+#define OTP_READ_PW1 (OTP_BASE_ADDR + 4 * 0x1C)
+#define OTP_READ_PW2 (OTP_BASE_ADDR + 4 * 0x1D)
+#define OTP_TCRST (OTP_BASE_ADDR + 4 * 0x20)
+#define OTP_RSRD (OTP_BASE_ADDR + 4 * 0x21)
+#define OTP_TREADEN_VAL (OTP_BASE_ADDR + 4 * 0x22)
+#define OTP_TDLES_VAL (OTP_BASE_ADDR + 4 * 0x23)
+#define OTP_TWWL_VAL (OTP_BASE_ADDR + 4 * 0x24)
+#define OTP_TDLEH_VAL (OTP_BASE_ADDR + 4 * 0x25)
+#define OTP_TWPED_VAL (OTP_BASE_ADDR + 4 * 0x26)
+#define OTP_TPES_VAL (OTP_BASE_ADDR + 4 * 0x27)
+#define OTP_TCPS_VAL (OTP_BASE_ADDR + 4 * 0x28)
+#define OTP_TCPH_VAL (OTP_BASE_ADDR + 4 * 0x29)
+#define OTP_TPGMVFY_VAL (OTP_BASE_ADDR + 4 * 0x2A)
+#define OTP_TPEH_VAL (OTP_BASE_ADDR + 4 * 0x2B)
+#define OTP_TPGRST_VAL (OTP_BASE_ADDR + 4 * 0x2C)
+#define OTP_TCLES_VAL (OTP_BASE_ADDR + 4 * 0x2D)
+#define OTP_TCLEH_VAL (OTP_BASE_ADDR + 4 * 0x2E)
+#define OTP_TRDES_VAL (OTP_BASE_ADDR + 4 * 0x2F)
+#define OTP_TBCACC_VAL (OTP_BASE_ADDR + 4 * 0x30)
+#define OTP_TAAC_VAL (OTP_BASE_ADDR + 4 * 0x31)
+#define OTP_TACCT_VAL (OTP_BASE_ADDR + 4 * 0x32)
+#define OTP_TRDEP_VAL (OTP_BASE_ADDR + 4 * 0x38)
+#define OTP_TPGSV_VAL (OTP_BASE_ADDR + 4 * 0x39)
+#define OTP_TPVSR_VAL (OTP_BASE_ADDR + 4 * 0x3A)
+#define OTP_TPVHR_VAL (OTP_BASE_ADDR + 4 * 0x3B)
+#define OTP_TPVSA_VAL (OTP_BASE_ADDR + 4 * 0x3C)
+
+#define PHY_ID1 (0x02)
+#define PHY_ID2 (0x03)
+
+#define PHY_DEV_ID_OUI_VTSE (0x04001C)
+#define PHY_DEV_ID_MODEL_VTSE_8502 (0x23)
+
+#define PHY_AUTONEG_ADV (0x04)
+#define NWAY_AR_NEXT_PAGE_ (0x8000)
+#define NWAY_AR_REMOTE_FAULT_ (0x2000)
+#define NWAY_AR_ASM_DIR_ (0x0800)
+#define NWAY_AR_PAUSE_ (0x0400)
+#define NWAY_AR_100T4_CAPS_ (0x0200)
+#define NWAY_AR_100TX_FD_CAPS_ (0x0100)
+#define NWAY_AR_SELECTOR_FIELD_ (0x001F)
+#define NWAY_AR_100TX_HD_CAPS_ (0x0080)
+#define NWAY_AR_10T_FD_CAPS_ (0x0040)
+#define NWAY_AR_10T_HD_CAPS_ (0x0020)
+#define NWAY_AR_ALL_CAPS_ (NWAY_AR_10T_HD_CAPS_ | \
+ NWAY_AR_10T_FD_CAPS_ | \
+ NWAY_AR_100TX_HD_CAPS_ | \
+ NWAY_AR_100TX_FD_CAPS_)
+#define NWAY_AR_PAUSE_MASK (NWAY_AR_PAUSE_ | NWAY_AR_ASM_DIR_)
+
+#define PHY_LP_ABILITY (0x05)
+#define NWAY_LPAR_NEXT_PAGE_ (0x8000)
+#define NWAY_LPAR_ACKNOWLEDGE_ (0x4000)
+#define NWAY_LPAR_REMOTE_FAULT_ (0x2000)
+#define NWAY_LPAR_ASM_DIR_ (0x0800)
+#define NWAY_LPAR_PAUSE_ (0x0400)
+#define NWAY_LPAR_100T4_CAPS_ (0x0200)
+#define NWAY_LPAR_100TX_FD_CAPS_ (0x0100)
+#define NWAY_LPAR_100TX_HD_CAPS_ (0x0080)
+#define NWAY_LPAR_10T_FD_CAPS_ (0x0040)
+#define NWAY_LPAR_10T_HD_CAPS_ (0x0020)
+#define NWAY_LPAR_SELECTOR_FIELD_ (0x001F)
+
+#define PHY_AUTONEG_EXP (0x06)
+#define NWAY_ER_PAR_DETECT_FAULT_ (0x0010)
+#define NWAY_ER_LP_NEXT_PAGE_CAPS_ (0x0008)
+#define NWAY_ER_NEXT_PAGE_CAPS_ (0x0004)
+#define NWAY_ER_PAGE_RXD_ (0x0002)
+#define NWAY_ER_LP_NWAY_CAPS_ (0x0001)
+
+#define PHY_NEXT_PAGE_TX (0x07)
+#define NPTX_NEXT_PAGE_ (0x8000)
+#define NPTX_MSG_PAGE_ (0x2000)
+#define NPTX_ACKNOWLDGE2_ (0x1000)
+#define NPTX_TOGGLE_ (0x0800)
+#define NPTX_MSG_CODE_FIELD_ (0x0001)
+
+#define PHY_LP_NEXT_PAGE (0x08)
+#define LP_RNPR_NEXT_PAGE_ (0x8000)
+#define LP_RNPR_ACKNOWLDGE_ (0x4000)
+#define LP_RNPR_MSG_PAGE_ (0x2000)
+#define LP_RNPR_ACKNOWLDGE2_ (0x1000)
+#define LP_RNPR_TOGGLE_ (0x0800)
+#define LP_RNPR_MSG_CODE_FIELD_ (0x0001)
+
+#define PHY_1000T_CTRL (0x09)
+#define CR_1000T_TEST_MODE_4_ (0x8000)
+#define CR_1000T_TEST_MODE_3_ (0x6000)
+#define CR_1000T_TEST_MODE_2_ (0x4000)
+#define CR_1000T_TEST_MODE_1_ (0x2000)
+#define CR_1000T_MS_ENABLE_ (0x1000)
+#define CR_1000T_MS_VALUE_ (0x0800)
+#define CR_1000T_REPEATER_DTE_ (0x0400)
+#define CR_1000T_FD_CAPS_ (0x0200)
+#define CR_1000T_HD_CAPS_ (0x0100)
+#define CR_1000T_ASYM_PAUSE_ (0x0080)
+#define CR_1000T_TEST_MODE_NORMAL_ (0x0000)
+
+#define PHY_1000T_STATUS (0x0A)
+#define SR_1000T_MS_CONFIG_FAULT_ (0x8000)
+#define SR_1000T_MS_CONFIG_RES_ (0x4000)
+#define SR_1000T_LOCAL_RX_STATUS_ (0x2000)
+#define SR_1000T_REMOTE_RX_STATUS_ (0x1000)
+#define SR_1000T_LP_FD_CAPS_ (0x0800)
+#define SR_1000T_LP_HD_CAPS_ (0x0400)
+#define SR_1000T_ASYM_PAUSE_DIR_ (0x0100)
+#define SR_1000T_IDLE_ERROR_CNT_ (0x00FF)
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
+
+#define PHY_EXT_STATUS (0x0F)
+#define IEEE_ESR_1000X_FD_CAPS_ (0x8000)
+#define IEEE_ESR_1000X_HD_CAPS_ (0x4000)
+#define IEEE_ESR_1000T_FD_CAPS_ (0x2000)
+#define IEEE_ESR_1000T_HD_CAPS_ (0x1000)
+#define PHY_TX_POLARITY_MASK_ (0x0100)
+#define PHY_TX_NORMAL_POLARITY_ (0x0000)
+#define AUTO_POLARITY_DISABLE_ (0x0010)
+
+#define PHY_MMD_CTL (0x0D)
+#define PHY_MMD_CTRL_OP_MASK_ (0xC000)
+#define PHY_MMD_CTRL_OP_REG_ (0x0000)
+#define PHY_MMD_CTRL_OP_DNI_ (0x4000)
+#define PHY_MMD_CTRL_OP_DPIRW_ (0x8000)
+#define PHY_MMD_CTRL_OP_DPIWO_ (0xC000)
+#define PHY_MMD_CTRL_DEV_ADDR_MASK_ (0x001F)
+
+#define PHY_MMD_REG_DATA (0x0E)
+
+/* VTSE Vendor Specific registers */
+#define PHY_VTSE_BYPASS (0x12)
+#define PHY_VTSE_BYPASS_DISABLE_PAIR_SWAP_ (0x0020)
+
+#define PHY_VTSE_INT_MASK (0x19)
+#define PHY_VTSE_INT_MASK_MDINTPIN_EN_ (0x8000)
+#define PHY_VTSE_INT_MASK_SPEED_CHANGE_ (0x4000)
+#define PHY_VTSE_INT_MASK_LINK_CHANGE_ (0x2000)
+#define PHY_VTSE_INT_MASK_FDX_CHANGE_ (0x1000)
+#define PHY_VTSE_INT_MASK_AUTONEG_ERR_ (0x0800)
+#define PHY_VTSE_INT_MASK_AUTONEG_DONE_ (0x0400)
+#define PHY_VTSE_INT_MASK_POE_DETECT_ (0x0200)
+#define PHY_VTSE_INT_MASK_SYMBOL_ERR_ (0x0100)
+#define PHY_VTSE_INT_MASK_FAST_LINK_FAIL_ (0x0080)
+#define PHY_VTSE_INT_MASK_WOL_EVENT_ (0x0040)
+#define PHY_VTSE_INT_MASK_EXTENDED_INT_ (0x0020)
+#define PHY_VTSE_INT_MASK_RESERVED_ (0x0010)
+#define PHY_VTSE_INT_MASK_FALSE_CARRIER_ (0x0008)
+#define PHY_VTSE_INT_MASK_LINK_SPEED_DS_ (0x0004)
+#define PHY_VTSE_INT_MASK_MASTER_SLAVE_DONE_ (0x0002)
+#define PHY_VTSE_INT_MASK_RX__ER_ (0x0001)
+
+#define PHY_VTSE_INT_STS (0x1A)
+#define PHY_VTSE_INT_STS_INT_ACTIVE_ (0x8000)
+#define PHY_VTSE_INT_STS_SPEED_CHANGE_ (0x4000)
+#define PHY_VTSE_INT_STS_LINK_CHANGE_ (0x2000)
+#define PHY_VTSE_INT_STS_FDX_CHANGE_ (0x1000)
+#define PHY_VTSE_INT_STS_AUTONEG_ERR_ (0x0800)
+#define PHY_VTSE_INT_STS_AUTONEG_DONE_ (0x0400)
+#define PHY_VTSE_INT_STS_POE_DETECT_ (0x0200)
+#define PHY_VTSE_INT_STS_SYMBOL_ERR_ (0x0100)
+#define PHY_VTSE_INT_STS_FAST_LINK_FAIL_ (0x0080)
+#define PHY_VTSE_INT_STS_WOL_EVENT_ (0x0040)
+#define PHY_VTSE_INT_STS_EXTENDED_INT_ (0x0020)
+#define PHY_VTSE_INT_STS_RESERVED_ (0x0010)
+#define PHY_VTSE_INT_STS_FALSE_CARRIER_ (0x0008)
+#define PHY_VTSE_INT_STS_LINK_SPEED_DS_ (0x0004)
+#define PHY_VTSE_INT_STS_MASTER_SLAVE_DONE_ (0x0002)
+#define PHY_VTSE_INT_STS_RX_ER_ (0x0001)
+
+/* VTSE PHY registers */
+#define PHY_EXT_GPIO_PAGE (0x1F)
+#define PHY_EXT_GPIO_PAGE_SPACE_0 (0x0000)
+#define PHY_EXT_GPIO_PAGE_SPACE_1 (0x0001)
+#define PHY_EXT_GPIO_PAGE_SPACE_2 (0x0002)
+
+/* Extended Register Page 1 space */
+#define PHY_EXT_MODE_CTRL (0x13)
+#define PHY_EXT_MODE_CTRL_MDIX_MASK_ (0x000C)
+#define PHY_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000)
+#define PHY_EXT_MODE_CTRL_MDI_ (0x0008)
+#define PHY_EXT_MODE_CTRL_MDI_X_ (0x000C)
+
+#define PHY_ANA_10BASE_T_HD 0x01
+#define PHY_ANA_10BASE_T_FD 0x02
+#define PHY_ANA_100BASE_TX_HD 0x04
+#define PHY_ANA_100BASE_TX_FD 0x08
+#define PHY_ANA_1000BASE_T_FD 0x10
+#define PHY_ANA_ALL_SUPPORTED_MEDIA (PHY_ANA_10BASE_T_HD | \
+ PHY_ANA_10BASE_T_FD | \
+ PHY_ANA_100BASE_TX_HD | \
+ PHY_ANA_100BASE_TX_FD | \
+ PHY_ANA_1000BASE_T_FD)
+/* PHY MMD registers */
+#define PHY_MMD_DEV_3 3
+
+#define PHY_EEE_PCS_STATUS (0x1)
+#define PHY_EEE_PCS_STATUS_TX_LPI_RCVD_ ((WORD)0x0800)
+#define PHY_EEE_PCS_STATUS_RX_LPI_RCVD_ ((WORD)0x0400)
+#define PHY_EEE_PCS_STATUS_TX_LPI_IND_ ((WORD)0x0200)
+#define PHY_EEE_PCS_STATUS_RX_LPI_IND_ ((WORD)0x0100)
+#define PHY_EEE_PCS_STATUS_PCS_RCV_LNK_STS_ ((WORD)0x0004)
+
+#define PHY_EEE_CAPABILITIES (0x14)
+#define PHY_EEE_CAPABILITIES_1000BT_EEE_ ((WORD)0x0004)
+#define PHY_EEE_CAPABILITIES_100BT_EEE_ ((WORD)0x0002)
+
+#define PHY_MMD_DEV_7 7
+
+#define PHY_EEE_ADVERTISEMENT (0x3C)
+#define PHY_EEE_ADVERTISEMENT_1000BT_EEE_ ((WORD)0x0004)
+#define PHY_EEE_ADVERTISEMENT_100BT_EEE_ ((WORD)0x0002)
+
+#define PHY_EEE_LP_ADVERTISEMENT (0x3D)
+#define PHY_EEE_1000BT_EEE_CAPABLE_ ((WORD)0x0004)
+#define PHY_EEE_100BT_EEE_CAPABLE_ ((WORD)0x0002)
+#endif /* _LAN78XX_H */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9d43460ce3c7..1f7a7cd97e50 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -785,6 +785,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
/* 4. Gobi 1000 devices */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ad8cbc6c9ee7..fe4ec324aebc 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -339,6 +339,7 @@
/* USB_USB_CTRL */
#define RX_AGG_DISABLE 0x0010
+#define RX_ZERO_EN 0x0080
/* USB_U2P3_CTRL */
#define U2P3_ENABLE 0x0001
@@ -622,6 +623,7 @@ enum rtl_version {
RTL_VER_03,
RTL_VER_04,
RTL_VER_05,
+ RTL_VER_06,
RTL_VER_MAX
};
@@ -2610,7 +2612,10 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
u32 ocp_data;
u16 data;
- ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+ if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+ tp->version == RTL_VER_05)
+ ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
data = r8152_mdio_read(tp, MII_BMCR);
if (data & BMCR_PDOWN) {
data &= ~BMCR_PDOWN;
@@ -2711,7 +2716,7 @@ static void r8153_first_init(struct r8152 *tp)
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
- ocp_data &= ~RX_AGG_DISABLE;
+ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
@@ -3241,7 +3246,7 @@ static void r8152b_init(struct r8152 *tp)
/* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
- ocp_data &= ~RX_AGG_DISABLE;
+ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
@@ -3293,6 +3298,13 @@ static void r8153_init(struct r8152 *tp)
else
ocp_data |= DYNAMIC_BURST;
ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+ } else if (tp->version == RTL_VER_06) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1);
+ if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0)
+ ocp_data &= ~DYNAMIC_BURST;
+ else
+ ocp_data |= DYNAMIC_BURST;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
}
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -3988,6 +4000,10 @@ static void r8152b_get_version(struct r8152 *tp)
tp->version = RTL_VER_05;
tp->mii.supports_gmii = 1;
break;
+ case 0x5c30:
+ tp->version = RTL_VER_06;
+ tp->mii.supports_gmii = 1;
+ break;
default:
netif_info(tp, probe, tp->netdev,
"Unknown version 0x%04x\n", version);
@@ -4033,6 +4049,7 @@ static int rtl_ops_init(struct r8152 *tp)
case RTL_VER_03:
case RTL_VER_04:
case RTL_VER_05:
+ case RTL_VER_06:
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8153_disable;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c8186ffda1a3..343592c4315f 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -290,6 +290,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_poll_controller = veth_poll_controller,
#endif
.ndo_get_iflink = veth_get_iflink,
+ .ndo_features_check = passthru_features_check,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 237f8e5e493d..546b669fbfdd 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -518,7 +518,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
skb_mark_napi_id(skb, &rq->napi);
- netif_receive_skb(skb);
+ napi_gro_receive(&rq->napi, skb);
return;
frame_err:
@@ -756,7 +756,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */
if (received < budget) {
r = virtqueue_enable_cb_prepare(rq->vq);
- napi_complete(napi);
+ napi_complete_done(napi, received);
if (unlikely(virtqueue_poll(rq->vq, r)) &&
napi_schedule_prep(napi)) {
virtqueue_disable_cb(rq->vq);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 34c519eb1db5..06c0731ae619 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -49,15 +49,12 @@
#include <net/ip6_tunnel.h>
#include <net/ip6_checksum.h>
#endif
+#include <net/dst_metadata.h>
#define VXLAN_VERSION "0.1"
#define PORT_HASH_BITS 8
#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
-#define VNI_HASH_BITS 10
-#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
-#define FDB_HASH_BITS 8
-#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
#define FDB_AGE_DEFAULT 300 /* 5 min */
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
@@ -74,9 +71,13 @@ module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
static int vxlan_net_id;
+static struct rtnl_link_ops vxlan_link_ops;
static const u8 all_zeros_mac[ETH_ALEN];
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ bool no_share, u32 flags);
+
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
@@ -84,21 +85,6 @@ struct vxlan_net {
spinlock_t sock_lock;
};
-union vxlan_addr {
- struct sockaddr_in sin;
- struct sockaddr_in6 sin6;
- struct sockaddr sa;
-};
-
-struct vxlan_rdst {
- union vxlan_addr remote_ip;
- __be16 remote_port;
- u32 remote_vni;
- u32 remote_ifindex;
- struct list_head list;
- struct rcu_head rcu;
-};
-
/* Forwarding table entry */
struct vxlan_fdb {
struct hlist_node hlist; /* linked list of entries */
@@ -106,40 +92,21 @@ struct vxlan_fdb {
unsigned long updated; /* jiffies */
unsigned long used;
struct list_head remotes;
+ u8 eth_addr[ETH_ALEN];
u16 state; /* see ndm_state */
u8 flags; /* see ndm_flags */
- u8 eth_addr[ETH_ALEN];
-};
-
-/* Pseudo network device */
-struct vxlan_dev {
- struct hlist_node hlist; /* vni hash table */
- struct list_head next; /* vxlan's per namespace list */
- struct vxlan_sock *vn_sock; /* listening socket */
- struct net_device *dev;
- struct net *net; /* netns for packet i/o */
- struct vxlan_rdst default_dst; /* default destination */
- union vxlan_addr saddr; /* source address */
- __be16 dst_port;
- __u16 port_min; /* source port range */
- __u16 port_max;
- __u8 tos; /* TOS override */
- __u8 ttl;
- u32 flags; /* VXLAN_F_* in vxlan.h */
-
- unsigned long age_interval;
- struct timer_list age_timer;
- spinlock_t hash_lock;
- unsigned int addrcnt;
- unsigned int addrmax;
-
- struct hlist_head fdb_head[FDB_HASH_SIZE];
};
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
static struct workqueue_struct *vxlan_wq;
+static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
+{
+ return vs->flags & VXLAN_F_COLLECT_METADATA ||
+ ip_tunnel_collect_metadata();
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static inline
bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
@@ -345,7 +312,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
goto nla_put_failure;
- if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
+ if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
nla_put_be16(skb, NDA_PORT, rdst->remote_port))
goto nla_put_failure;
if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
@@ -749,7 +716,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (!(flags & NLM_F_CREATE))
return -ENOENT;
- if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
+ if (vxlan->cfg.addrmax &&
+ vxlan->addrcnt >= vxlan->cfg.addrmax)
return -ENOSPC;
/* Disallow replace to add a multicast entry */
@@ -835,7 +803,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
return -EINVAL;
*port = nla_get_be16(tb[NDA_PORT]);
} else {
- *port = vxlan->dst_port;
+ *port = vxlan->cfg.dst_port;
}
if (tb[NDA_VNI]) {
@@ -963,10 +931,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
- if (idx < cb->args[0])
- goto skip;
-
list_for_each_entry_rcu(rd, &f->remotes, list) {
+ if (idx < cb->args[0])
+ goto skip;
+
err = vxlan_fdb_info(skb, vxlan, f,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
@@ -974,9 +942,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
NLM_F_MULTI, rd);
if (err < 0)
goto out;
- }
skip:
- ++idx;
+ ++idx;
+ }
}
}
out:
@@ -1021,7 +989,7 @@ static bool vxlan_snoop(struct net_device *dev,
vxlan_fdb_create(vxlan, src_mac, src_ip,
NUD_REACHABLE,
NLM_F_EXCL|NLM_F_CREATE,
- vxlan->dst_port,
+ vxlan->cfg.dst_port,
vxlan->default_dst.remote_vni,
0, NTF_SELF);
spin_unlock(&vxlan->hash_lock);
@@ -1062,7 +1030,7 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
return false;
}
-void vxlan_sock_release(struct vxlan_sock *vs)
+static void vxlan_sock_release(struct vxlan_sock *vs)
{
struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
@@ -1078,7 +1046,6 @@ void vxlan_sock_release(struct vxlan_sock *vs)
queue_work(vxlan_wq, &vs->del_work);
}
-EXPORT_SYMBOL_GPL(vxlan_sock_release);
/* Update multicast group membership when first VNI on
* multicast address is brought up
@@ -1161,13 +1128,112 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
return vh;
}
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+ struct vxlan_metadata *md, u32 vni,
+ struct metadata_dst *tun_dst)
+{
+ struct iphdr *oip = NULL;
+ struct ipv6hdr *oip6 = NULL;
+ struct vxlan_dev *vxlan;
+ struct pcpu_sw_netstats *stats;
+ union vxlan_addr saddr;
+ int err = 0;
+ union vxlan_addr *remote_ip;
+
+ /* For flow based devices, map all packets to VNI 0 */
+ if (vs->flags & VXLAN_F_COLLECT_METADATA)
+ vni = 0;
+
+ /* Is this VNI defined? */
+ vxlan = vxlan_vs_find_vni(vs, vni);
+ if (!vxlan)
+ goto drop;
+
+ remote_ip = &vxlan->default_dst.remote_ip;
+ skb_reset_mac_header(skb);
+ skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
+ skb->protocol = eth_type_trans(skb, vxlan->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+ /* Ignore packet loops (and multicast echo) */
+ if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+ goto drop;
+
+ /* Re-examine inner Ethernet packet */
+ if (remote_ip->sa.sa_family == AF_INET) {
+ oip = ip_hdr(skb);
+ saddr.sin.sin_addr.s_addr = oip->saddr;
+ saddr.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ oip6 = ipv6_hdr(skb);
+ saddr.sin6.sin6_addr = oip6->saddr;
+ saddr.sa.sa_family = AF_INET6;
+#endif
+ }
+
+ if (tun_dst) {
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ tun_dst = NULL;
+ }
+
+ if ((vxlan->flags & VXLAN_F_LEARN) &&
+ vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
+ goto drop;
+
+ skb_reset_network_header(skb);
+ /* In flow-based mode, GBP is carried in dst_metadata */
+ if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
+ skb->mark = md->gbp;
+
+ if (oip6)
+ err = IP6_ECN_decapsulate(oip6, skb);
+ if (oip)
+ err = IP_ECN_decapsulate(oip, skb);
+
+ if (unlikely(err)) {
+ if (log_ecn_error) {
+ if (oip6)
+ net_info_ratelimited("non-ECT from %pI6\n",
+ &oip6->saddr);
+ if (oip)
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ &oip->saddr, oip->tos);
+ }
+ if (err > 1) {
+ ++vxlan->dev->stats.rx_frame_errors;
+ ++vxlan->dev->stats.rx_errors;
+ goto drop;
+ }
+ }
+
+ stats = this_cpu_ptr(vxlan->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ netif_rx(skb);
+
+ return;
+drop:
+ if (tun_dst)
+ dst_release((struct dst_entry *)tun_dst);
+
+ /* Consume bad packet */
+ kfree_skb(skb);
+}
+
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
+ struct metadata_dst *tun_dst = NULL;
+ struct ip_tunnel_info *info;
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
u32 flags, vni;
- struct vxlan_metadata md = {0};
+ struct vxlan_metadata _md;
+ struct vxlan_metadata *md = &_md;
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1202,6 +1268,32 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
vni &= VXLAN_VNI_MASK;
}
+ if (vxlan_collect_metadata(vs)) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
+ if (!tun_dst)
+ goto drop;
+
+ info = &tun_dst->u.tun_info;
+ info->key.ipv4_src = iph->saddr;
+ info->key.ipv4_dst = iph->daddr;
+ info->key.ipv4_tos = iph->tos;
+ info->key.ipv4_ttl = iph->ttl;
+ info->key.tp_src = udp_hdr(skb)->source;
+ info->key.tp_dst = udp_hdr(skb)->dest;
+
+ info->mode = IP_TUNNEL_INFO_RX;
+ info->key.tun_flags = TUNNEL_KEY;
+ info->key.tun_id = cpu_to_be64(vni >> 8);
+ if (udp_hdr(skb)->check != 0)
+ info->key.tun_flags |= TUNNEL_CSUM;
+
+ md = ip_tunnel_info_opts(info, sizeof(*md));
+ } else {
+ memset(md, 0, sizeof(*md));
+ }
+
/* For backwards compatibility, only allow reserved fields to be
* used by VXLAN extensions if explicitly requested.
*/
@@ -1209,13 +1301,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
struct vxlanhdr_gbp *gbp;
gbp = (struct vxlanhdr_gbp *)vxh;
- md.gbp = ntohs(gbp->policy_id);
+ md->gbp = ntohs(gbp->policy_id);
+
+ if (tun_dst)
+ info->key.tun_flags |= TUNNEL_VXLAN_OPT;
if (gbp->dont_learn)
- md.gbp |= VXLAN_GBP_DONT_LEARN;
+ md->gbp |= VXLAN_GBP_DONT_LEARN;
if (gbp->policy_applied)
- md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+ md->gbp |= VXLAN_GBP_POLICY_APPLIED;
flags &= ~VXLAN_GBP_USED_BITS;
}
@@ -1233,8 +1328,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto bad_flags;
}
- md.vni = vxh->vx_vni;
- vs->rcv(vs, skb, &md);
+ vxlan_rcv(vs, skb, md, vni >> 8, tun_dst);
return 0;
drop:
@@ -1247,93 +1341,13 @@ bad_flags:
ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
error:
+ if (tun_dst)
+ dst_release((struct dst_entry *)tun_dst);
+
/* Return non vxlan pkt */
return 1;
}
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
- struct vxlan_metadata *md)
-{
- struct iphdr *oip = NULL;
- struct ipv6hdr *oip6 = NULL;
- struct vxlan_dev *vxlan;
- struct pcpu_sw_netstats *stats;
- union vxlan_addr saddr;
- __u32 vni;
- int err = 0;
- union vxlan_addr *remote_ip;
-
- vni = ntohl(md->vni) >> 8;
- /* Is this VNI defined? */
- vxlan = vxlan_vs_find_vni(vs, vni);
- if (!vxlan)
- goto drop;
-
- remote_ip = &vxlan->default_dst.remote_ip;
- skb_reset_mac_header(skb);
- skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
- skb->protocol = eth_type_trans(skb, vxlan->dev);
- skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
- /* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
- goto drop;
-
- /* Re-examine inner Ethernet packet */
- if (remote_ip->sa.sa_family == AF_INET) {
- oip = ip_hdr(skb);
- saddr.sin.sin_addr.s_addr = oip->saddr;
- saddr.sa.sa_family = AF_INET;
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- oip6 = ipv6_hdr(skb);
- saddr.sin6.sin6_addr = oip6->saddr;
- saddr.sa.sa_family = AF_INET6;
-#endif
- }
-
- if ((vxlan->flags & VXLAN_F_LEARN) &&
- vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
- goto drop;
-
- skb_reset_network_header(skb);
- skb->mark = md->gbp;
-
- if (oip6)
- err = IP6_ECN_decapsulate(oip6, skb);
- if (oip)
- err = IP_ECN_decapsulate(oip, skb);
-
- if (unlikely(err)) {
- if (log_ecn_error) {
- if (oip6)
- net_info_ratelimited("non-ECT from %pI6\n",
- &oip6->saddr);
- if (oip)
- net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
- &oip->saddr, oip->tos);
- }
- if (err > 1) {
- ++vxlan->dev->stats.rx_frame_errors;
- ++vxlan->dev->stats.rx_errors;
- goto drop;
- }
- }
-
- stats = this_cpu_ptr(vxlan->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-
- netif_rx(skb);
-
- return;
-drop:
- /* Consume bad packet */
- kfree_skb(skb);
-}
-
static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -1672,7 +1686,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr, __u8 prio, __u8 ttl,
- __be16 src_port, __be16 dst_port,
+ __be16 src_port, __be16 dst_port, __be32 vni,
struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
struct vxlanhdr *vxh;
@@ -1722,7 +1736,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = md->vni;
+ vxh->vx_vni = vni;
if (type & SKB_GSO_TUNNEL_REMCSUM) {
u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1755,10 +1769,10 @@ err:
}
#endif
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port,
- struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port, __be32 vni,
+ struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
struct vxlanhdr *vxh;
int min_headroom;
@@ -1801,7 +1815,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = md->vni;
+ vxh->vx_vni = vni;
if (type & SKB_GSO_TUNNEL_REMCSUM) {
u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1828,7 +1842,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
ttl, df, src_port, dst_port, xnet,
!(vxflags & VXLAN_F_UDP_CSUM));
}
-EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
/* Bypass encapsulation if the destination is local */
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
@@ -1878,22 +1891,43 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_rdst *rdst, bool did_rsc)
{
+ struct ip_tunnel_info *info;
struct vxlan_dev *vxlan = netdev_priv(dev);
struct sock *sk = vxlan->vn_sock->sock->sk;
struct rtable *rt = NULL;
const struct iphdr *old_iph;
struct flowi4 fl4;
union vxlan_addr *dst;
- struct vxlan_metadata md;
+ union vxlan_addr remote_ip;
+ struct vxlan_metadata _md;
+ struct vxlan_metadata *md = &_md;
__be16 src_port = 0, dst_port;
u32 vni;
__be16 df = 0;
__u8 tos, ttl;
int err;
+ u32 flags = vxlan->flags;
+
+ /* FIXME: Support IPv6 */
+ info = skb_tunnel_info(skb, AF_INET);
+
+ if (rdst) {
+ dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
+ vni = rdst->remote_vni;
+ dst = &rdst->remote_ip;
+ } else {
+ if (!info) {
+ WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
+ dev->name);
+ goto drop;
+ }
- dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
- vni = rdst->remote_vni;
- dst = &rdst->remote_ip;
+ dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+ vni = be64_to_cpu(info->key.tun_id);
+ remote_ip.sin.sin_family = AF_INET;
+ remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst;
+ dst = &remote_ip;
+ }
if (vxlan_addr_any(dst)) {
if (did_rsc) {
@@ -1906,25 +1940,42 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
old_iph = ip_hdr(skb);
- ttl = vxlan->ttl;
+ ttl = vxlan->cfg.ttl;
if (!ttl && vxlan_addr_multicast(dst))
ttl = 1;
- tos = vxlan->tos;
+ tos = vxlan->cfg.tos;
if (tos == 1)
tos = ip_tunnel_get_dsfield(old_iph, skb);
- src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
- vxlan->port_max, true);
+ src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+ vxlan->cfg.port_max, true);
if (dst->sa.sa_family == AF_INET) {
+ if (info) {
+ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+ if (info->key.tun_flags & TUNNEL_CSUM)
+ flags |= VXLAN_F_UDP_CSUM;
+ else
+ flags &= ~VXLAN_F_UDP_CSUM;
+
+ ttl = info->key.ipv4_ttl;
+ tos = info->key.ipv4_tos;
+
+ if (info->options_len)
+ md = ip_tunnel_info_opts(info, sizeof(*md));
+ } else {
+ md->gbp = skb->mark;
+ }
+
memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = rdst->remote_ifindex;
+ fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
fl4.flowi4_tos = RT_TOS(tos);
fl4.flowi4_mark = skb->mark;
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = dst->sin.sin_addr.s_addr;
- fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
+ fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
rt = ip_route_output_key(vxlan->net, &fl4);
if (IS_ERR(rt)) {
@@ -1958,14 +2009,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
- md.vni = htonl(vni << 8);
- md.gbp = skb->mark;
-
err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
dst->sin.sin_addr.s_addr, tos, ttl, df,
- src_port, dst_port, &md,
+ src_port, dst_port, htonl(vni << 8), md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
- vxlan->flags);
+ flags);
if (err < 0) {
/* skb is already freed. */
skb = NULL;
@@ -1980,13 +2028,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
u32 flags;
memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = rdst->remote_ifindex;
+ fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
fl6.daddr = dst->sin6.sin6_addr;
- fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+ fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
- if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
+ if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
netdev_dbg(dev, "no route to %pI6\n",
&dst->sin6.sin6_addr);
dev->stats.tx_carrier_errors++;
@@ -2018,11 +2066,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ttl = ttl ? : ip6_dst_hoplimit(ndst);
- md.vni = htonl(vni << 8);
- md.gbp = skb->mark;
+ md->gbp = skb->mark;
err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
- 0, ttl, src_port, dst_port, &md,
+ 0, ttl, src_port, dst_port, htonl(vni << 8), md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
vxlan->flags);
#endif
@@ -2051,11 +2098,15 @@ tx_free:
static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ const struct ip_tunnel_info *info;
struct ethhdr *eth;
bool did_rsc = false;
struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f;
+ /* FIXME: Support IPv6 */
+ info = skb_tunnel_info(skb, AF_INET);
+
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
@@ -2078,6 +2129,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
}
+ if (vxlan->flags & VXLAN_F_COLLECT_METADATA &&
+ info && info->mode == IP_TUNNEL_INFO_TX) {
+ vxlan_xmit_one(skb, dev, NULL, false);
+ return NETDEV_TX_OK;
+ }
+
f = vxlan_find_mac(vxlan, eth->h_dest);
did_rsc = false;
@@ -2143,7 +2200,7 @@ static void vxlan_cleanup(unsigned long arg)
if (f->state & NUD_PERMANENT)
continue;
- timeout = f->used + vxlan->age_interval * HZ;
+ timeout = f->used + vxlan->cfg.age_interval * HZ;
if (time_before_eq(timeout, jiffies)) {
netdev_dbg(vxlan->dev,
"garbage collect %pM\n",
@@ -2207,8 +2264,8 @@ static int vxlan_open(struct net_device *dev)
struct vxlan_sock *vs;
int ret = 0;
- vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
- false, vxlan->flags);
+ vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
+ vxlan->cfg.no_share, vxlan->flags);
if (IS_ERR(vs))
return PTR_ERR(vs);
@@ -2222,7 +2279,7 @@ static int vxlan_open(struct net_device *dev)
}
}
- if (vxlan->age_interval)
+ if (vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
return ret;
@@ -2380,7 +2437,7 @@ static void vxlan_setup(struct net_device *dev)
vxlan->age_timer.function = vxlan_cleanup;
vxlan->age_timer.data = (unsigned long) vxlan;
- vxlan->dst_port = htons(vxlan_port);
+ vxlan->cfg.dst_port = htons(vxlan_port);
vxlan->dev = dev;
@@ -2405,6 +2462,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_RSC] = { .type = NLA_U8 },
[IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
[IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
+ [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
[IFLA_VXLAN_PORT] = { .type = NLA_U16 },
[IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
@@ -2500,7 +2558,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
/* Create new listen socket if needed */
static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
u32 flags)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -2529,8 +2586,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
vs->sock = sock;
atomic_set(&vs->refcnt, 1);
- vs->rcv = rcv;
- vs->data = data;
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
/* Initialize the vxlan udp offloads structure */
@@ -2554,9 +2609,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
return vs;
}
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
- bool no_share, u32 flags)
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ bool no_share, u32 flags)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
@@ -2566,7 +2620,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
flags);
- if (vs && vs->rcv == rcv) {
+ if (vs) {
if (!atomic_add_unless(&vs->refcnt, 1, 0))
vs = ERR_PTR(-EBUSY);
spin_unlock(&vn->sock_lock);
@@ -2575,58 +2629,38 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
spin_unlock(&vn->sock_lock);
}
- return vxlan_socket_create(net, port, rcv, data, flags);
+ return vxlan_socket_create(net, port, flags);
}
-EXPORT_SYMBOL_GPL(vxlan_sock_add);
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+ struct vxlan_config *conf)
{
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
- __u32 vni;
int err;
bool use_ipv6 = false;
-
- if (!data[IFLA_VXLAN_ID])
- return -EINVAL;
+ __be16 default_port = vxlan->cfg.dst_port;
vxlan->net = src_net;
- vni = nla_get_u32(data[IFLA_VXLAN_ID]);
- dst->remote_vni = vni;
-
- /* Unless IPv6 is explicitly requested, assume IPv4 */
- dst->remote_ip.sa.sa_family = AF_INET;
- if (data[IFLA_VXLAN_GROUP]) {
- dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
- } else if (data[IFLA_VXLAN_GROUP6]) {
- if (!IS_ENABLED(CONFIG_IPV6))
- return -EPFNOSUPPORT;
+ dst->remote_vni = conf->vni;
- dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
- dst->remote_ip.sa.sa_family = AF_INET6;
- use_ipv6 = true;
- }
+ memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
- if (data[IFLA_VXLAN_LOCAL]) {
- vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
- vxlan->saddr.sa.sa_family = AF_INET;
- } else if (data[IFLA_VXLAN_LOCAL6]) {
- if (!IS_ENABLED(CONFIG_IPV6))
- return -EPFNOSUPPORT;
+ /* Unless IPv6 is explicitly requested, assume IPv4 */
+ if (!dst->remote_ip.sa.sa_family)
+ dst->remote_ip.sa.sa_family = AF_INET;
- /* TODO: respect scope id */
- vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
- vxlan->saddr.sa.sa_family = AF_INET6;
+ if (dst->remote_ip.sa.sa_family == AF_INET6 ||
+ vxlan->cfg.saddr.sa.sa_family == AF_INET6)
use_ipv6 = true;
- }
- if (data[IFLA_VXLAN_LINK] &&
- (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
+ if (conf->remote_ifindex) {
struct net_device *lowerdev
- = __dev_get_by_index(src_net, dst->remote_ifindex);
+ = __dev_get_by_index(src_net, conf->remote_ifindex);
+
+ dst->remote_ifindex = conf->remote_ifindex;
if (!lowerdev) {
pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2644,7 +2678,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
}
#endif
- if (!tb[IFLA_MTU])
+ if (!conf->mtu)
dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
dev->needed_headroom = lowerdev->hard_header_len +
@@ -2652,101 +2686,188 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
} else if (use_ipv6)
vxlan->flags |= VXLAN_F_IPV6;
+ memcpy(&vxlan->cfg, conf, sizeof(*conf));
+ if (!vxlan->cfg.dst_port)
+ vxlan->cfg.dst_port = default_port;
+ vxlan->flags |= conf->flags;
+
+ if (!vxlan->cfg.age_interval)
+ vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
+
+ if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
+ vxlan->cfg.dst_port, vxlan->flags))
+ return -EEXIST;
+
+ dev->ethtool_ops = &vxlan_ethtool_ops;
+
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ &vxlan->default_dst.remote_ip,
+ NUD_REACHABLE|NUD_PERMANENT,
+ NLM_F_EXCL|NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_ifindex,
+ NTF_SELF);
+ if (err)
+ return err;
+ }
+
+ err = register_netdevice(dev);
+ if (err) {
+ vxlan_fdb_delete_default(vxlan);
+ return err;
+ }
+
+ list_add(&vxlan->next, &vn->vxlan_list);
+
+ return 0;
+}
+
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type, struct vxlan_config *conf)
+{
+ struct nlattr *tb[IFLA_MAX+1];
+ struct net_device *dev;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &vxlan_link_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = vxlan_dev_configure(net, dev, conf);
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct vxlan_config conf;
+ int err;
+
+ if (!data[IFLA_VXLAN_ID])
+ return -EINVAL;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+
+ if (data[IFLA_VXLAN_GROUP]) {
+ conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
+ } else if (data[IFLA_VXLAN_GROUP6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+
+ conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
+ conf.remote_ip.sa.sa_family = AF_INET6;
+ }
+
+ if (data[IFLA_VXLAN_LOCAL]) {
+ conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
+ conf.saddr.sa.sa_family = AF_INET;
+ } else if (data[IFLA_VXLAN_LOCAL6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+
+ /* TODO: respect scope id */
+ conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
+ conf.saddr.sa.sa_family = AF_INET6;
+ }
+
+ if (data[IFLA_VXLAN_LINK])
+ conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
+
if (data[IFLA_VXLAN_TOS])
- vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
+ conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
if (data[IFLA_VXLAN_TTL])
- vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+ conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
- vxlan->flags |= VXLAN_F_LEARN;
+ conf.flags |= VXLAN_F_LEARN;
if (data[IFLA_VXLAN_AGEING])
- vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
- else
- vxlan->age_interval = FDB_AGE_DEFAULT;
+ conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
- vxlan->flags |= VXLAN_F_PROXY;
+ conf.flags |= VXLAN_F_PROXY;
if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
- vxlan->flags |= VXLAN_F_RSC;
+ conf.flags |= VXLAN_F_RSC;
if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
- vxlan->flags |= VXLAN_F_L2MISS;
+ conf.flags |= VXLAN_F_L2MISS;
if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
- vxlan->flags |= VXLAN_F_L3MISS;
+ conf.flags |= VXLAN_F_L3MISS;
if (data[IFLA_VXLAN_LIMIT])
- vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+ conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+
+ if (data[IFLA_VXLAN_COLLECT_METADATA] &&
+ nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
+ conf.flags |= VXLAN_F_COLLECT_METADATA;
if (data[IFLA_VXLAN_PORT_RANGE]) {
const struct ifla_vxlan_port_range *p
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
- vxlan->port_min = ntohs(p->low);
- vxlan->port_max = ntohs(p->high);
+ conf.port_min = ntohs(p->low);
+ conf.port_max = ntohs(p->high);
}
if (data[IFLA_VXLAN_PORT])
- vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+ conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
- vxlan->flags |= VXLAN_F_UDP_CSUM;
+ conf.flags |= VXLAN_F_UDP_CSUM;
if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
- vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+ conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
- vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+ conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
if (data[IFLA_VXLAN_REMCSUM_TX] &&
nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
- vxlan->flags |= VXLAN_F_REMCSUM_TX;
+ conf.flags |= VXLAN_F_REMCSUM_TX;
if (data[IFLA_VXLAN_REMCSUM_RX] &&
nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
- vxlan->flags |= VXLAN_F_REMCSUM_RX;
+ conf.flags |= VXLAN_F_REMCSUM_RX;
if (data[IFLA_VXLAN_GBP])
- vxlan->flags |= VXLAN_F_GBP;
+ conf.flags |= VXLAN_F_GBP;
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
- vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
-
- if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
- vxlan->dst_port, vxlan->flags)) {
- pr_info("duplicate VNI %u\n", vni);
- return -EEXIST;
- }
+ conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
- dev->ethtool_ops = &vxlan_ethtool_ops;
+ err = vxlan_dev_configure(src_net, dev, &conf);
+ switch (err) {
+ case -ENODEV:
+ pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
+ break;
- /* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
- NUD_REACHABLE|NUD_PERMANENT,
- NLM_F_EXCL|NLM_F_CREATE,
- vxlan->dst_port,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex,
- NTF_SELF);
- if (err)
- return err;
- }
+ case -EPERM:
+ pr_info("IPv6 is disabled via sysctl\n");
+ break;
- err = register_netdevice(dev);
- if (err) {
- vxlan_fdb_delete_default(vxlan);
- return err;
+ case -EEXIST:
+ pr_info("duplicate VNI %u\n", conf.vni);
+ break;
}
- list_add(&vxlan->next, &vn->vxlan_list);
-
- return 0;
+ return err;
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
@@ -2777,6 +2898,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -2794,8 +2916,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
const struct vxlan_dev *vxlan = netdev_priv(dev);
const struct vxlan_rdst *dst = &vxlan->default_dst;
struct ifla_vxlan_port_range ports = {
- .low = htons(vxlan->port_min),
- .high = htons(vxlan->port_max),
+ .low = htons(vxlan->cfg.port_min),
+ .high = htons(vxlan->cfg.port_max),
};
if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
@@ -2818,22 +2940,22 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
goto nla_put_failure;
- if (!vxlan_addr_any(&vxlan->saddr)) {
- if (vxlan->saddr.sa.sa_family == AF_INET) {
+ if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
+ if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
- vxlan->saddr.sin.sin_addr.s_addr))
+ vxlan->cfg.saddr.sin.sin_addr.s_addr))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
- &vxlan->saddr.sin6.sin6_addr))
+ &vxlan->cfg.saddr.sin6.sin6_addr))
goto nla_put_failure;
#endif
}
}
- if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
- nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
+ if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+ nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
!!(vxlan->flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
@@ -2843,9 +2965,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(vxlan->flags & VXLAN_F_L2MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
!!(vxlan->flags & VXLAN_F_L3MISS)) ||
- nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
- nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
- nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
+ nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
+ !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
+ nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
+ nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
+ nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
!!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 9729e6941635..c04fb00e7930 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -11,7 +11,8 @@ ath10k_core-y += mac.o \
wmi-tlv.o \
bmi.o \
hw.o \
- p2p.o
+ p2p.o \
+ swap.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 31a990635490..df7c7616533b 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -178,7 +178,7 @@ struct bmi_target_info {
};
/* in msec */
-#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index e508c65b6ba8..cf28fbebaedc 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -452,6 +452,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
+ struct ath10k *ar = ce_state->ar;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc *base = dest_ring->base_addr_owner_space;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 0eddb204d85b..5c903e15dd65 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -21,7 +21,7 @@
#include "hif.h"
/* Maximum number of Copy Engine's supported */
-#define CE_COUNT_MAX 8
+#define CE_COUNT_MAX 12
#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
/* Descriptor rings must be aligned to this boundary */
@@ -38,8 +38,13 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
-#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
-#define CE_DESC_FLAGS_META_DATA_LSB 2
+
+/* Following desc flags are used in QCA99X0 */
+#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
+#define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3)
+
+#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
+#define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb
struct ce_desc {
__le32 addr;
@@ -423,8 +428,10 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
+ ar->regs->ce_wrap_intr_sum_host_msi_lsb
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
+ ar->regs->ce_wrap_intr_sum_host_msi_mask
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 59496a90ad5e..f79fa6c67ebc 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -49,6 +49,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
.has_shifted_cc_wraparound = true,
+ .otp_exe_param = 0,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
.fw = QCA988X_HW_2_0_FW_FILE,
@@ -63,6 +64,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .otp_exe_param = 0,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
.fw = QCA6174_HW_2_1_FW_FILE,
@@ -77,6 +79,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .otp_exe_param = 0,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
.fw = QCA6174_HW_3_0_FW_FILE,
@@ -91,6 +94,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .otp_exe_param = 0,
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
@@ -101,8 +105,68 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
+ {
+ .id = QCA99X0_HW_2_0_DEV_VERSION,
+ .name = "qca99x0 hw2.0",
+ .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .fw = {
+ .dir = QCA99X0_HW_2_0_FW_DIR,
+ .fw = QCA99X0_HW_2_0_FW_FILE,
+ .otp = QCA99X0_HW_2_0_OTP_FILE,
+ .board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ },
};
+static const char *const ath10k_core_fw_feature_str[] = {
+ [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
+ [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
+ [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
+ [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
+ [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
+ [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
+ [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
+ [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
+ [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
+ [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
+};
+
+static unsigned int ath10k_core_get_fw_feature_str(char *buf,
+ size_t buf_len,
+ enum ath10k_fw_features feat)
+{
+ if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
+ WARN_ON(!ath10k_core_fw_feature_str[feat])) {
+ return scnprintf(buf, buf_len, "bit%d", feat);
+ }
+
+ return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
+}
+
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+ char *buf,
+ size_t buf_len)
+{
+ unsigned int len = 0;
+ int i;
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ if (test_bit(i, ar->fw_features)) {
+ if (len > 0)
+ len += scnprintf(buf + len, buf_len - len, ",");
+
+ len += ath10k_core_get_fw_feature_str(buf + len,
+ buf_len - len,
+ i);
+ }
+ }
+}
+
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
@@ -355,6 +419,7 @@ out:
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
+ u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
int ret;
ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
@@ -380,7 +445,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
return ret;
}
- ret = ath10k_bmi_execute(ar, address, 0, &result);
+ ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
if (ret) {
ath10k_err(ar, "could not execute otp (%d)\n", ret);
return ret;
@@ -412,6 +477,13 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
data = ar->firmware_data;
data_len = ar->firmware_len;
mode_name = "normal";
+ ret = ath10k_swap_code_seg_configure(ar,
+ ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+ if (ret) {
+ ath10k_err(ar, "failed to configure fw code swap: %d\n",
+ ret);
+ return ret;
+ }
break;
case ATH10K_FIRMWARE_MODE_UTF:
data = ar->testmode.utf->data;
@@ -451,6 +523,8 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (!IS_ERR(ar->cal_file))
release_firmware(ar->cal_file);
+ ath10k_swap_code_seg_release(ar);
+
ar->board = NULL;
ar->board_data = NULL;
ar->board_len = 0;
@@ -464,6 +538,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
ar->firmware_len = 0;
ar->cal_file = NULL;
+
}
static int ath10k_fetch_cal_file(struct ath10k *ar)
@@ -737,6 +812,13 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
ar->htt.op_version);
break;
+ case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw code swap image ie (%zd B)\n",
+ ie_len);
+ ar->swap.firmware_codeswap_data = data;
+ ar->swap.firmware_codeswap_len = ie_len;
+ break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
@@ -1014,6 +1096,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -1023,6 +1106,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS;
@@ -1033,6 +1117,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+ ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
+ ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
+ ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
+ ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1056,6 +1151,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
WARN_ON(1);
@@ -1330,6 +1426,13 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
+ ret = ath10k_swap_code_seg_init(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
+
mutex_lock(&ar->conf_mutex);
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -1470,9 +1573,15 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
switch (hw_rev) {
case ATH10K_HW_QCA988X:
ar->regs = &qca988x_regs;
+ ar->hw_values = &qca988x_values;
break;
case ATH10K_HW_QCA6174:
ar->regs = &qca6174_regs;
+ ar->hw_values = &qca6174_values;
+ break;
+ case ATH10K_HW_QCA99X0:
+ ar->regs = &qca99x0_regs;
+ ar->hw_values = &qca99x0_values;
break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 78094f23c9dd..78e07051b897 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -36,6 +36,7 @@
#include "spectral.h"
#include "thermal.h"
#include "wow.h"
+#include "swap.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -327,8 +328,8 @@ struct ath10k_vif {
u32 uapsd;
} sta;
struct {
- /* 127 stations; wmi limit */
- u8 tim_bitmap[16];
+ /* 512 stations */
+ u8 tim_bitmap[64];
u8 tim_len;
u32 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -545,6 +546,7 @@ struct ath10k {
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
+ u32 max_spatial_stream;
/* protected by conf_mutex */
bool ani_enabled;
@@ -560,6 +562,7 @@ struct ath10k {
struct completion target_suspend;
const struct ath10k_hw_regs *regs;
+ const struct ath10k_hw_values *hw_values;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
struct ath10k_htc htc;
@@ -570,6 +573,7 @@ struct ath10k {
const char *name;
u32 patch_load_addr;
int uart_pin;
+ u32 otp_exe_param;
/* This is true if given HW chip has a quirky Cycle Counter
* wraparound which resets to 0x7fffffff instead of 0. All
@@ -578,6 +582,12 @@ struct ath10k {
*/
bool has_shifted_cc_wraparound;
+ /* Some of chip expects fragment descriptor to be continuous
+ * memory for any TX operation. Set continuous_frag_desc flag
+ * for the hardware which have such requirement.
+ */
+ bool continuous_frag_desc;
+
struct ath10k_hw_params_fw {
const char *dir;
const char *fw;
@@ -602,6 +612,12 @@ struct ath10k {
const struct firmware *cal_file;
+ struct {
+ const void *firmware_codeswap_data;
+ size_t firmware_codeswap_len;
+ struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+ } swap;
+
char spec_board_id[100];
bool spec_board_loaded;
@@ -617,6 +633,7 @@ struct ath10k {
bool is_roc;
int vdev_id;
int roc_freq;
+ bool roc_notify;
} scan;
struct {
@@ -675,6 +692,8 @@ struct ath10k {
int max_num_stations;
int max_num_vdevs;
int max_num_tdls_vdevs;
+ int num_active_peers;
+ int num_tids;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
@@ -749,6 +768,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_hw_rev hw_rev,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+ char *buf,
+ size_t max_len);
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 8fa606a9c4dd..edf6047997a7 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -124,7 +124,11 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
- ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+ char fw_features[128];
+
+ ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
+
+ ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d features %s\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
@@ -137,8 +141,10 @@ void ath10k_print_driver_info(struct ath10k *ar)
ar->htt.target_version_major,
ar->htt.target_version_minor,
ar->wmi.op_version,
+ ar->htt.op_version,
ath10k_cal_mode_str(ar->cal_mode),
- ar->max_num_stations);
+ ar->max_num_stations,
+ fw_features);
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
config_enabled(CONFIG_ATH10K_DEBUGFS),
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 6da6ef26143a..4474c3e839db 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -102,6 +102,43 @@ static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
};
+static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
+ [HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+ [HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
+ [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
+ HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
+ HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+};
+
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
@@ -147,6 +184,10 @@ int ath10k_htt_init(struct ath10k *ar)
2; /* ip4 dscp or ip6 priority */
switch (ar->htt.op_version) {
+ case ATH10K_FW_HTT_OP_VERSION_10_4:
+ ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
+ break;
case ATH10K_FW_HTT_OP_VERSION_10_1:
ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
@@ -208,5 +249,9 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
if (status)
return status;
+ status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+ if (status)
+ return status;
+
return ath10k_htt_send_rx_ring_cfg_ll(htt);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 7e8a0d835663..8bdf1e7dd171 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -87,6 +87,11 @@ struct htt_data_tx_desc_frag {
__le32 len;
} __packed;
+struct htt_msdu_ext_desc {
+ __le32 tso_flag[4];
+ struct htt_data_tx_desc_frag frags[6];
+};
+
enum htt_data_tx_desc_flags0 {
HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
@@ -349,6 +354,38 @@ enum htt_tlv_t2h_msg_type {
HTT_TLV_T2H_NUM_MSGS
};
+enum htt_10_4_t2h_msg_type {
+ HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
+ HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10,
+ HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11,
+ HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
+ HTT_10_4_T2H_MSG_TYPE_TEST = 0x13,
+ HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
+ HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17,
+ HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
+ /* 0x19 to 0x2f are reserved */
+ HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30,
+ /* keep this last */
+ HTT_10_4_T2H_NUM_MSGS
+};
+
enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_VERSION_CONF,
HTT_T2H_MSG_TYPE_RX_IND,
@@ -375,6 +412,10 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_AGGR_CONF,
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
HTT_T2H_MSG_TYPE_TEST,
+ HTT_T2H_MSG_TYPE_EN_STATS,
+ HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+ HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
/* keep this last */
HTT_T2H_NUM_MSGS
};
@@ -1430,6 +1471,11 @@ struct ath10k_htt {
/* rx_status template */
struct ieee80211_rx_status rx_status;
+
+ struct {
+ dma_addr_t paddr;
+ struct htt_msdu_ext_desc *vaddr;
+ } frag_desc;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1497,6 +1543,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 89eb16b30fc4..d7d118328f31 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1201,7 +1201,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
{
struct htt_rx_desc *rxd;
enum rx_msdu_decap_format decap;
- struct ieee80211_hdr *hdr;
/* First msdu's decapped header:
* [802.11 header] <-- padded to 4 bytes long
@@ -1215,7 +1214,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
*/
rxd = (void *)msdu->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
@@ -2074,6 +2072,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
break;
+ case HTT_T2H_MSG_TYPE_EN_STATS:
+ case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+ case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
+ case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
resp->hdr.msg_type);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index a60ef7d1d5fc..148d5b607c3c 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -84,6 +84,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ int ret, size;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
@@ -94,11 +95,31 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
- idr_destroy(&htt->pending_tx);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_idr_pending_tx;
+ }
+
+ if (!ar->hw_params.continuous_frag_desc)
+ goto skip_frag_desc_alloc;
+
+ size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+ htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_DMA);
+ if (!htt->frag_desc.vaddr) {
+ ath10k_warn(ar, "failed to alloc fragment desc memory\n");
+ ret = -ENOMEM;
+ goto free_tx_pool;
}
+skip_frag_desc_alloc:
return 0;
+
+free_tx_pool:
+ dma_pool_destroy(htt->tx_pool);
+free_idr_pending_tx:
+ idr_destroy(&htt->pending_tx);
+ return ret;
}
static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
@@ -121,9 +142,18 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
+ int size;
+
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
dma_pool_destroy(htt->tx_pool);
+
+ if (htt->frag_desc.vaddr) {
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
+ dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
+ htt->frag_desc.paddr);
+ }
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -201,6 +231,48 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
return 0;
}
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int ret, size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ if (!htt->frag_desc.paddr) {
+ ath10k_warn(ar, "invalid frag desc memory\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+ skb = ath10k_htc_alloc_skb(ar, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+ cmd->frag_desc_bank_cfg.info = 0;
+ cmd->frag_desc_bank_cfg.num_banks = 1;
+ cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
+ cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
+ __cpu_to_le32(htt->frag_desc.paddr);
+ cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
+ __cpu_to_le16(htt->max_num_pending_tx - 1);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 5997f00afe3b..fef7ccf6e185 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -34,8 +34,15 @@ const struct ath10k_hw_regs qca988x_regs = {
.ce7_base_address = 0x00059000,
.soc_reset_control_si0_rst_mask = 0x00000001,
.soc_reset_control_ce_rst_mask = 0x00040000,
- .soc_chip_id_address = 0x00ec,
- .scratch_3_address = 0x0030,
+ .soc_chip_id_address = 0x000000ec,
+ .scratch_3_address = 0x00000030,
+ .fw_indicator_address = 0x00009030,
+ .pcie_local_base_address = 0x00080000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
+ .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+ .pcie_intr_fw_mask = 0x00000400,
+ .pcie_intr_ce_mask_all = 0x0007f800,
+ .pcie_intr_clr_address = 0x00000014,
};
const struct ath10k_hw_regs qca6174_regs = {
@@ -54,8 +61,79 @@ const struct ath10k_hw_regs qca6174_regs = {
.ce7_base_address = 0x00036000,
.soc_reset_control_si0_rst_mask = 0x00000000,
.soc_reset_control_ce_rst_mask = 0x00000001,
- .soc_chip_id_address = 0x000f0,
- .scratch_3_address = 0x0028,
+ .soc_chip_id_address = 0x000000f0,
+ .scratch_3_address = 0x00000028,
+ .fw_indicator_address = 0x0003a028,
+ .pcie_local_base_address = 0x00080000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
+ .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+ .pcie_intr_fw_mask = 0x00000400,
+ .pcie_intr_ce_mask_all = 0x0007f800,
+ .pcie_intr_clr_address = 0x00000014,
+};
+
+const struct ath10k_hw_regs qca99x0_regs = {
+ .rtc_state_cold_reset_mask = 0x00000400,
+ .rtc_soc_base_address = 0x00080000,
+ .rtc_wmac_base_address = 0x00000000,
+ .soc_core_base_address = 0x00082000,
+ .ce_wrapper_base_address = 0x0004d000,
+ .ce0_base_address = 0x0004a000,
+ .ce1_base_address = 0x0004a400,
+ .ce2_base_address = 0x0004a800,
+ .ce3_base_address = 0x0004ac00,
+ .ce4_base_address = 0x0004b000,
+ .ce5_base_address = 0x0004b400,
+ .ce6_base_address = 0x0004b800,
+ .ce7_base_address = 0x0004bc00,
+ /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+ * CE0 and CE1 no other copy engine is directly referred in the code.
+ * It is not really neccessary to assign address for newly supported
+ * CEs in this address table.
+ * Copy Engine Address
+ * CE8 0x0004c000
+ * CE9 0x0004c400
+ * CE10 0x0004c800
+ * CE11 0x0004cc00
+ */
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .soc_chip_id_address = 0x000000ec,
+ .scratch_3_address = 0x00040050,
+ .fw_indicator_address = 0x00040050,
+ .pcie_local_base_address = 0x00000000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+ .pcie_intr_ce_mask_all = 0x000fff00,
+ .pcie_intr_clr_address = 0x00000010,
+};
+
+const struct ath10k_hw_values qca988x_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 8,
+ .msi_assign_ce_max = 7,
+ .num_target_ce_config_wlan = 7,
+ .ce_desc_meta_data_mask = 0xFFFC,
+ .ce_desc_meta_data_lsb = 2,
+};
+
+const struct ath10k_hw_values qca6174_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 8,
+ .msi_assign_ce_max = 7,
+ .num_target_ce_config_wlan = 7,
+ .ce_desc_meta_data_mask = 0xFFFC,
+ .ce_desc_meta_data_lsb = 2,
+};
+
+const struct ath10k_hw_values qca99x0_values = {
+ .rtc_state_val_on = 5,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
};
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 85cca29375fe..917228517546 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -72,6 +72,18 @@ enum qca6174_chip_id_rev {
#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
+/* QCA99X0 1.0 definitions (unsupported) */
+#define QCA99X0_HW_1_0_CHIP_ID_REV 0x0
+
+/* QCA99X0 2.0 definitions */
+#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
+#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
+#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
+#define QCA99X0_HW_2_0_FW_FILE "firmware.bin"
+#define QCA99X0_HW_2_0_OTP_FILE "otp.bin"
+#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
@@ -112,6 +124,9 @@ enum ath10k_fw_ie_type {
* FW API 5 and above.
*/
ATH10K_FW_IE_HTT_OP_VERSION = 6,
+
+ /* Code swap image for firmware binary */
+ ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
};
enum ath10k_fw_wmi_op_version {
@@ -122,6 +137,7 @@ enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
ATH10K_FW_WMI_OP_VERSION_TLV = 4,
ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+ ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
/* keep last */
ATH10K_FW_WMI_OP_VERSION_MAX,
@@ -137,6 +153,8 @@ enum ath10k_fw_htt_op_version {
ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+ ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
+
/* keep last */
ATH10K_FW_HTT_OP_VERSION_MAX,
};
@@ -144,6 +162,7 @@ enum ath10k_fw_htt_op_version {
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
+ ATH10K_HW_QCA99X0,
};
struct ath10k_hw_regs {
@@ -164,16 +183,38 @@ struct ath10k_hw_regs {
u32 soc_reset_control_ce_rst_mask;
u32 soc_chip_id_address;
u32 scratch_3_address;
+ u32 fw_indicator_address;
+ u32 pcie_local_base_address;
+ u32 ce_wrap_intr_sum_host_msi_lsb;
+ u32 ce_wrap_intr_sum_host_msi_mask;
+ u32 pcie_intr_fw_mask;
+ u32 pcie_intr_ce_mask_all;
+ u32 pcie_intr_clr_address;
};
extern const struct ath10k_hw_regs qca988x_regs;
extern const struct ath10k_hw_regs qca6174_regs;
+extern const struct ath10k_hw_regs qca99x0_regs;
+
+struct ath10k_hw_values {
+ u32 rtc_state_val_on;
+ u8 ce_count;
+ u8 msi_assign_ce_max;
+ u8 num_target_ce_config_wlan;
+ u16 ce_desc_meta_data_mask;
+ u8 ce_desc_meta_data_lsb;
+};
+
+extern const struct ath10k_hw_values qca988x_values;
+extern const struct ath10k_hw_values qca6174_values;
+extern const struct ath10k_hw_values qca99x0_values;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
@@ -310,8 +351,73 @@ enum ath10k_hw_rate_cck {
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
+/* Diagnostic Window */
+#define CE_DIAG_PIPE 7
+
+#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
+
+/* Target specific defines for 10.4 firmware */
+#define TARGET_10_4_NUM_VDEVS 16
+#define TARGET_10_4_NUM_STATIONS 32
+#define TARGET_10_4_NUM_PEERS ((TARGET_10_4_NUM_STATIONS) + \
+ (TARGET_10_4_NUM_VDEVS))
+#define TARGET_10_4_ACTIVE_PEERS 0
+
+/* TODO: increase qcache max client limit to 512 after
+ * testing with 512 client.
+ */
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 256
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
+#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
+#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
+#define TARGET_10_4_NUM_PEER_KEYS 2
+#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_AST_SKID_LIMIT 32
+#define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \
+ BIT(2) | BIT(3))
+#define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \
+ BIT(2) | BIT(3))
+
+/* 100 ms for video, best-effort, and background */
+#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100
+
+/* 40 ms for voice */
+#define TARGET_10_4_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_10_4_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10_4_SCAN_MAX_REQS 4
+#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES 8
+
+/* Note: mcast to ucast is disabled by default */
+#define TARGET_10_4_NUM_MCAST_GROUPS 0
+#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10_4_MCAST2UCAST_MODE 0
+
+#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
+#define TARGET_10_4_NUM_WDS_ENTRIES 32
+#define TARGET_10_4_DMA_BURST_SIZE 1
+#define TARGET_10_4_MAC_AGGR_DELIM 0
+#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10_4_VOW_CONFIG 0
+#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10_4_11AC_TX_MAX_FRAGS 2
+#define TARGET_10_4_MAX_PEER_EXT_STATS 16
+#define TARGET_10_4_SMART_ANT_CAP 0
+#define TARGET_10_4_BK_MIN_FREE 0
+#define TARGET_10_4_BE_MIN_FREE 0
+#define TARGET_10_4_VI_MIN_FREE 0
+#define TARGET_10_4_VO_MIN_FREE 0
+#define TARGET_10_4_RX_BATCH_MODE 1
+#define TARGET_10_4_THERMAL_THROTTLING_CONFIG 0
+#define TARGET_10_4_ATF_CONFIG 0
+#define TARGET_10_4_IPHDR_PAD_CONFIG 1
+#define TARGET_10_4_QWRAP_CONFIG 0
+
/* Number of Copy Engines supported */
-#define CE_COUNT 8
+#define CE_COUNT ar->hw_values->ce_count
/*
* Total number of PCIe MSI interrupts requested for all interrupt sources.
@@ -335,10 +441,10 @@ enum ath10k_hw_rate_cck {
/* MSIs for Copy Engines */
#define MSI_ASSIGN_CE_INITIAL 1
-#define MSI_ASSIGN_CE_MAX 7
+#define MSI_ASSIGN_CE_MAX ar->hw_values->msi_assign_ce_max
/* as of IP3.7.1 */
-#define RTC_STATE_V_ON 3
+#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on
#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
#define RTC_STATE_V_LSB 0
@@ -374,7 +480,7 @@ enum ath10k_hw_rate_cck {
#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
#define DBI_BASE_ADDRESS 0x00060000
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
-#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
+#define PCIE_LOCAL_BASE_ADDRESS ar->regs->pcie_local_base_address
#define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000
@@ -448,7 +554,7 @@ enum ath10k_hw_rate_cck {
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
-#define PCIE_INTR_CLR_ADDRESS 0x0014
+#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address
#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010
@@ -456,16 +562,18 @@ enum ath10k_hw_rate_cck {
#define CCNT_TO_MSEC(x) ((x) / 88000)
/* Firmware indications to the Host via SCRATCH_3 register. */
-#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address
#define FW_IND_EVENT_PENDING 1
#define FW_IND_INITIALIZED 2
/* HOST_REG interrupt from firmware */
-#define PCIE_INTR_FIRMWARE_MASK 0x00000400
-#define PCIE_INTR_CE_MASK_ALL 0x0007f800
+#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask
+#define PCIE_INTR_CE_MASK_ALL ar->regs->pcie_intr_ce_mask_all
#define DRAM_BASE_ADDRESS 0x00400000
+#define PCIE_BAR_REG_ADDRESS 0x40030
+
#define MISSING 0
#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 218b6af63447..c9a7d5b5dffc 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1668,7 +1668,7 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
return 0;
}
-static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+static int ath10k_mac_num_vifs_started(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int num = 0;
@@ -1676,7 +1676,7 @@ static int ath10k_mac_ps_vif_count(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list)
- if (arvif->ps)
+ if (arvif->is_started)
num++;
return num;
@@ -1700,7 +1700,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
enable_ps = arvif->ps;
- if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+ if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
!test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
ar->fw_features)) {
ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
@@ -3034,38 +3034,16 @@ static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->htt.tx_lock);
- switch (pause_id) {
- case WMI_TLV_TX_PAUSE_ID_MCC:
- case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
- case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
- case WMI_TLV_TX_PAUSE_ID_AP_PS:
- case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
- switch (action) {
- case WMI_TLV_TX_PAUSE_ACTION_STOP:
- ath10k_mac_vif_tx_lock(arvif, pause_id);
- break;
- case WMI_TLV_TX_PAUSE_ACTION_WAKE:
- ath10k_mac_vif_tx_unlock(arvif, pause_id);
- break;
- default:
- ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
- action, arvif->vdev_id);
- break;
- }
+ switch (action) {
+ case WMI_TLV_TX_PAUSE_ACTION_STOP:
+ ath10k_mac_vif_tx_lock(arvif, pause_id);
+ break;
+ case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+ ath10k_mac_vif_tx_unlock(arvif, pause_id);
break;
- case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
- case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
- case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
- case WMI_TLV_TX_PAUSE_ID_HOST:
default:
- /* FIXME: Some pause_ids aren't vdev specific. Instead they
- * target peer_id and tid. Implementing these could improve
- * traffic scheduling fairness across multiple connected
- * stations in AP/IBSS modes.
- */
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac ignoring unsupported tx pause vdev %i id %d\n",
- arvif->vdev_id, pause_id);
+ ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+ action, arvif->vdev_id);
break;
}
}
@@ -3082,12 +3060,15 @@ static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_mac_tx_pause *arg = data;
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
}
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
- enum wmi_tlv_tx_pause_id pause_id,
- enum wmi_tlv_tx_pause_action action)
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
{
struct ath10k_mac_tx_pause arg = {
.vdev_id = vdev_id,
@@ -3449,14 +3430,13 @@ void __ath10k_scan_finish(struct ath10k *ar)
case ATH10K_SCAN_IDLE:
break;
case ATH10K_SCAN_RUNNING:
- if (ar->scan.is_roc)
- ieee80211_remain_on_channel_expired(ar->hw);
- /* fall through */
case ATH10K_SCAN_ABORTING:
if (!ar->scan.is_roc)
ieee80211_scan_completed(ar->hw,
(ar->scan.state ==
ATH10K_SCAN_ABORTING));
+ else if (ar->scan.roc_notify)
+ ieee80211_remain_on_channel_expired(ar->hw);
/* fall through */
case ATH10K_SCAN_STARTING:
ar->scan.state = ATH10K_SCAN_IDLE;
@@ -4641,9 +4621,6 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH10K_SCAN_ID;
- if (!req->no_cck)
- arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
-
if (req->ie_len) {
arg.ie_len = req->ie_len;
memcpy(arg.ie, req->ie, arg.ie_len);
@@ -5462,6 +5439,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ar->scan.is_roc = true;
ar->scan.vdev_id = arvif->vdev_id;
ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
ret = 0;
break;
case ATH10K_SCAN_STARTING:
@@ -5525,7 +5503,13 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
ath10k_scan_abort(ar);
+
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
@@ -5566,7 +5550,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct ath10k *ar = hw->priv;
bool skip;
- int ret;
+ long time_left;
/* mac80211 doesn't care if we really xmit queued frames or not
* we'll collect those frames either way if we stop/delete vdevs */
@@ -5578,7 +5562,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (ar->state == ATH10K_STATE_WEDGED)
goto skip;
- ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
+ time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
@@ -5592,9 +5576,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
(empty || skip);
}), ATH10K_FLUSH_TIMEOUT_HZ);
- if (ret <= 0 || skip)
- ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
- skip, ar->state, ret);
+ if (time_left == 0 || skip)
+ ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
+ skip, ar->state, time_left);
skip:
mutex_unlock(&ar->conf_mutex);
@@ -6219,6 +6203,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = true;
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
+ arvif->vdev_id, ret);
+ goto err_stop;
+ }
+
if (vif->type == NL80211_IFTYPE_MONITOR) {
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
if (ret) {
@@ -6236,6 +6227,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
err_stop:
ath10k_vdev_stop(arvif);
arvif->is_started = false;
+ ath10k_mac_vif_setup_ps(arvif);
err:
mutex_unlock(&ar->conf_mutex);
@@ -6565,8 +6557,11 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
{
.max = 2,
- .types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
},
@@ -6576,6 +6571,26 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
},
};
+static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
{
.max = 1,
@@ -6594,7 +6609,7 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
{
.limits = ath10k_tlv_if_limit,
.num_different_channels = 1,
- .max_interfaces = 3,
+ .max_interfaces = 4,
.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
},
{
@@ -6608,11 +6623,17 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
{
.limits = ath10k_tlv_if_limit,
- .num_different_channels = 2,
- .max_interfaces = 3,
+ .num_different_channels = 1,
+ .max_interfaces = 4,
.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
},
{
+ .limits = ath10k_tlv_qcs_if_limit,
+ .num_different_channels = 2,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
+ },
+ {
.limits = ath10k_tlv_if_limit_ibss,
.num_different_channels = 1,
.max_interfaces = 2,
@@ -6620,6 +6641,33 @@ static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
},
};
+static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 16,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+ {
+ .limits = ath10k_10_4_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+ .max_interfaces = 16,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
+ },
+};
+
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
@@ -6902,6 +6950,8 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
@@ -6941,6 +6991,11 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_10x_if_comb);
break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10_4_if_comb);
+ break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
WARN_ON(1);
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index b291f063705c..e3cefe4c7cfd 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -61,9 +61,9 @@ int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
- enum wmi_tlv_tx_pause_id pause_id,
- enum wmi_tlv_tx_pause_action action);
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action);
u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
u8 hw_rate);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ea656e011a96..5778e5277823 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -59,6 +59,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define QCA988X_2_0_DEVICE_ID (0x003c)
#define QCA6174_2_1_DEVICE_ID (0x003e)
+#define QCA99X0_2_0_DEVICE_ID (0x0040)
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
@@ -81,7 +82,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
static int ath10k_pci_cold_reset(struct ath10k *ar);
-static int ath10k_pci_warm_reset(struct ath10k *ar);
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
static int ath10k_pci_init_irq(struct ath10k *ar);
static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -90,6 +91,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer);
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
@@ -155,6 +157,38 @@ static const struct ce_attr host_ce_config_wlan[] = {
.src_sz_max = DIAG_TRANSFER_LIMIT,
.dest_nentries = 2,
},
+
+ /* CE8: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ },
+
+ /* CE9 target autonomous qcache memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: target autonomous hif memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE11: target autonomous hif memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
};
/* Target firmware's Copy Engine configuration. */
@@ -232,6 +266,38 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
},
/* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host packtlog */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(64),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 target autonomous qcache memcpy */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* It not necessary to send target wlan configuration for CE10 & CE11
+ * as these CEs are not actively used in target.
+ */
};
/*
@@ -479,6 +545,12 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
+ if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
+ ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+ offset, offset + sizeof(value), ar_pci->mem_len);
+ return;
+ }
+
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
@@ -496,6 +568,12 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
u32 val;
int ret;
+ if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
+ ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+ offset, offset + sizeof(val), ar_pci->mem_len);
+ return 0;
+ }
+
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
@@ -678,6 +756,26 @@ static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
ath10k_pci_rx_post(ar);
}
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0;
+
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA6174:
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS) &
+ 0x7ff) << 21;
+ break;
+ case ATH10K_HW_QCA99X0:
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+ break;
+ }
+
+ val |= 0x100000 | (addr & 0xfffff);
+ return val;
+}
+
/*
* Diagnostic read/write access is provided for startup/config/debug usage.
* Caller must guarantee proper alignment, when applicable, and single user
@@ -740,8 +838,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* convert it from Target CPU virtual address space
* to CE address space
*/
- address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
- address);
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
0);
@@ -899,7 +996,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* to
* CE address space
*/
- address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
remaining_bytes = orig_nbytes;
ce_data = ce_data_base;
@@ -1331,20 +1428,42 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
- val &= ~CORE_CTRL_PCIE_REG_31_MASK;
-
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA6174:
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS);
+ val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS, val);
+ break;
+ case ATH10K_HW_QCA99X0:
+ /* TODO: Find appropriate register configuration for QCA99X0
+ * to mask irq/MSI.
+ */
+ break;
+ }
}
static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
- val |= CORE_CTRL_PCIE_REG_31_MASK;
-
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA6174:
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS);
+ val |= CORE_CTRL_PCIE_REG_31_MASK;
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS, val);
+ break;
+ case ATH10K_HW_QCA99X0:
+ /* TODO: Find appropriate register configuration for QCA99X0
+ * to unmask irq/MSI.
+ */
+ break;
+ }
}
static void ath10k_pci_irq_disable(struct ath10k *ar)
@@ -1506,7 +1625,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
* masked. To prevent the device from asserting the interrupt reset it
* before proceeding with cleanup.
*/
- ath10k_pci_warm_reset(ar);
+ ath10k_pci_safe_chip_reset(ar);
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
@@ -1687,6 +1806,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (ar_pci->pdev->device) {
case QCA988X_2_0_DEVICE_ID:
+ case QCA99X0_2_0_DEVICE_ID:
return 1;
case QCA6174_2_1_DEVICE_ID:
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
@@ -1757,7 +1877,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
target_ce_config_wlan,
- sizeof(target_ce_config_wlan));
+ sizeof(struct ce_pipe_config) *
+ NUM_TARGET_CE_CONFIG_WLAN);
if (ret != 0) {
ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
@@ -1871,7 +1992,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar)
}
/* Last CE is Diagnostic Window */
- if (i == CE_COUNT - 1) {
+ if (i == CE_DIAG_PIPE) {
ar_pci->ce_diag = pipe->ce_hdl;
continue;
}
@@ -2016,6 +2137,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
+{
+ if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
+ return ath10k_pci_warm_reset(ar);
+ } else if (QCA_REV_99X0(ar)) {
+ ath10k_pci_irq_disable(ar);
+ return ath10k_pci_qca99x0_chip_reset(ar);
+ } else {
+ return -ENOTSUPP;
+ }
+}
+
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
{
int i, ret;
@@ -2122,12 +2255,38 @@ static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
+
+ return 0;
+}
+
static int ath10k_pci_chip_reset(struct ath10k *ar)
{
if (QCA_REV_988X(ar))
return ath10k_pci_qca988x_chip_reset(ar);
else if (QCA_REV_6174(ar))
return ath10k_pci_qca6174_chip_reset(ar);
+ else if (QCA_REV_99X0(ar))
+ return ath10k_pci_qca99x0_chip_reset(ar);
else
return -ENOTSUPP;
}
@@ -2679,6 +2838,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
pci_set_master(pdev);
/* Arrange for access to Target SoC registers. */
+ ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
if (!ar_pci->mem) {
ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
@@ -2745,6 +2905,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
break;
+ case QCA99X0_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA99X0;
+ break;
default:
WARN_ON(1);
return -ENOTSUPP;
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index d7696ddc03c4..8d364fb8f743 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -162,6 +162,7 @@ struct ath10k_pci {
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
+ size_t mem_len;
/*
* Number of MSI interrupts granted, 0 --> using legacy PCI line
@@ -236,18 +237,6 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define CDC_WAR_MAGIC_STR 0xceef0000
#define CDC_WAR_DATA_CE 4
-/*
- * TODO: Should be a function call specific to each Target-type.
- * This convoluted macro converts from Target CPU Virtual Address Space to CE
- * Address Space. As part of this process, we conservatively fetch the current
- * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
- * for this device; but that's not guaranteed.
- */
-#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
- (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \
- CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
- 0x100000 | ((addr) & 0xfffff))
-
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
new file mode 100644
index 000000000000..3ca3fae408a7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file has implementation for code swap logic. With code swap feature,
+ * target can run the fw binary with even smaller IRAM size by using host
+ * memory to store some of the code segments.
+ */
+
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+static int ath10k_swap_code_seg_fill(struct ath10k *ar,
+ struct ath10k_swap_code_seg_info *seg_info,
+ const void *data, size_t data_len)
+{
+ u8 *virt_addr = seg_info->virt_address[0];
+ u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
+ const u8 *fw_data = data;
+ union ath10k_swap_code_seg_item *swap_item;
+ u32 length = 0;
+ u32 payload_len;
+ u32 total_payload_len = 0;
+ u32 size_left = data_len;
+
+ /* Parse swap bin and copy the content to host allocated memory.
+ * The format is Address, length and value. The last 4-bytes is
+ * target write address. Currently address field is not used.
+ */
+ seg_info->target_addr = -1;
+ while (size_left >= sizeof(*swap_item)) {
+ swap_item = (union ath10k_swap_code_seg_item *)fw_data;
+ payload_len = __le32_to_cpu(swap_item->tlv.length);
+ if ((payload_len > size_left) ||
+ (payload_len == 0 &&
+ size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
+ ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
+ payload_len);
+ return -EINVAL;
+ }
+
+ if (payload_len == 0) {
+ if (memcmp(swap_item->tail.magic_signature, swap_magic,
+ ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
+ ath10k_err(ar, "refusing an invalid swap file\n");
+ return -EINVAL;
+ }
+ seg_info->target_addr =
+ __le32_to_cpu(swap_item->tail.bmi_write_addr);
+ break;
+ }
+
+ memcpy(virt_addr, swap_item->tlv.data, payload_len);
+ virt_addr += payload_len;
+ length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv);
+ size_left -= length;
+ fw_data += length;
+ total_payload_len += payload_len;
+ }
+
+ if (seg_info->target_addr == -1) {
+ ath10k_err(ar, "failed to parse invalid swap file\n");
+ return -EINVAL;
+ }
+ seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
+
+ return 0;
+}
+
+static void
+ath10k_swap_code_seg_free(struct ath10k *ar,
+ struct ath10k_swap_code_seg_info *seg_info)
+{
+ u32 seg_size;
+
+ if (!seg_info)
+ return;
+
+ if (!seg_info->virt_address[0])
+ return;
+
+ seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
+ dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
+ seg_info->paddr[0]);
+}
+
+static struct ath10k_swap_code_seg_info *
+ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
+{
+ struct ath10k_swap_code_seg_info *seg_info;
+ void *virt_addr;
+ dma_addr_t paddr;
+
+ swap_bin_len = roundup(swap_bin_len, 2);
+ if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
+ ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
+ swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
+ return NULL;
+ }
+
+ seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
+ if (!seg_info)
+ return NULL;
+
+ virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
+ GFP_KERNEL);
+ if (!virt_addr) {
+ ath10k_err(ar, "failed to allocate dma coherent memory\n");
+ return NULL;
+ }
+
+ seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
+ seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
+ seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
+ seg_info->seg_hw_info.num_segs =
+ __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
+ seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
+ seg_info->virt_address[0] = virt_addr;
+ seg_info->paddr[0] = paddr;
+
+ return seg_info;
+}
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ enum ath10k_swap_code_seg_bin_type type)
+{
+ int ret;
+ struct ath10k_swap_code_seg_info *seg_info = NULL;
+
+ switch (type) {
+ case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
+ if (!ar->swap.firmware_swap_code_seg_info)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+ seg_info = ar->swap.firmware_swap_code_seg_info;
+ break;
+ default:
+ case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
+ case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
+ ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
+ type);
+ return 0;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
+ &seg_info->seg_hw_info,
+ sizeof(seg_info->seg_hw_info));
+ if (ret) {
+ ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath10k_swap_code_seg_release(struct ath10k *ar)
+{
+ ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+ ar->swap.firmware_codeswap_data = NULL;
+ ar->swap.firmware_codeswap_len = 0;
+ ar->swap.firmware_swap_code_seg_info = NULL;
+}
+
+int ath10k_swap_code_seg_init(struct ath10k *ar)
+{
+ int ret;
+ struct ath10k_swap_code_seg_info *seg_info;
+
+ if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+ return 0;
+
+ seg_info = ath10k_swap_code_seg_alloc(ar,
+ ar->swap.firmware_codeswap_len);
+ if (!seg_info) {
+ ath10k_err(ar, "failed to allocate fw code swap segment\n");
+ return -ENOMEM;
+ }
+
+ ret = ath10k_swap_code_seg_fill(ar, seg_info,
+ ar->swap.firmware_codeswap_data,
+ ar->swap.firmware_codeswap_len);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
+ ret);
+ ath10k_swap_code_seg_free(ar, seg_info);
+ return ret;
+ }
+
+ ar->swap.firmware_swap_code_seg_info = seg_info;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
new file mode 100644
index 000000000000..5c89952dd20f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SWAP_H_
+#define _SWAP_H_
+
+#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX (512 * 1024)
+#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ 12
+#define ATH10K_SWAP_CODE_SEG_NUM_MAX 16
+/* Currently only one swap segment is supported */
+#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
+
+struct ath10k_swap_code_seg_tlv {
+ __le32 address;
+ __le32 length;
+ u8 data[0];
+} __packed;
+
+struct ath10k_swap_code_seg_tail {
+ u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
+ __le32 bmi_write_addr;
+} __packed;
+
+union ath10k_swap_code_seg_item {
+ struct ath10k_swap_code_seg_tlv tlv;
+ struct ath10k_swap_code_seg_tail tail;
+} __packed;
+
+enum ath10k_swap_code_seg_bin_type {
+ ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
+ ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
+ ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
+};
+
+struct ath10k_swap_code_seg_hw_info {
+ /* Swap binary image size */
+ __le32 swap_size;
+ __le32 num_segs;
+
+ /* Swap data size */
+ __le32 size;
+ __le32 size_log2;
+ __le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+ __le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+} __packed;
+
+struct ath10k_swap_code_seg_info {
+ struct ath10k_swap_code_seg_hw_info seg_hw_info;
+ void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+ u32 target_addr;
+ dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+};
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ enum ath10k_swap_code_seg_bin_type type);
+void ath10k_swap_code_seg_release(struct ath10k *ar);
+int ath10k_swap_code_seg_init(struct ath10k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index a417aae52623..768bef629099 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -450,4 +450,7 @@ Fw Mode/SubMode Mask
#define QCA6174_BOARD_DATA_SZ 8192
#define QCA6174_BOARD_EXT_DATA_SZ 0
+#define QCA99X0_BOARD_DATA_SZ 12288
+#define QCA99X0_BOARD_EXT_DATA_SZ 0
+
#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 826500bb2b1b..6cf289158840 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -147,9 +147,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
const u8 *addr, bool expect_mapped)
{
- int ret;
+ long time_left;
- ret = wait_event_timeout(ar->peer_mapping_wq, ({
+ time_left = wait_event_timeout(ar->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ar->data_lock);
@@ -160,7 +160,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
}), 3*HZ);
- if (ret <= 0)
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8fdba3865c96..4189d4a90ce0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -377,12 +377,34 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
"wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
pause_id, action, vdev_map, peer_id, tid_map);
- for (vdev_id = 0; vdev_map; vdev_id++) {
- if (!(vdev_map & BIT(vdev_id)))
- continue;
-
- vdev_map &= ~BIT(vdev_id);
- ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+ switch (pause_id) {
+ case WMI_TLV_TX_PAUSE_ID_MCC:
+ case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+ case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PS:
+ case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+ for (vdev_id = 0; vdev_map; vdev_id++) {
+ if (!(vdev_map & BIT(vdev_id)))
+ continue;
+
+ vdev_map &= ~BIT(vdev_id);
+ ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+ action);
+ }
+ break;
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+ case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+ case WMI_TLV_TX_PAUSE_ID_HOST:
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unsupported tx pause id %d\n",
+ pause_id);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unknown tx pause vdev %d\n",
+ pause_id);
+ break;
}
kfree(tb);
@@ -709,6 +731,8 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_swba_parse *swba = data;
+ struct wmi_tim_info_arg *tim_info_arg;
+ const struct wmi_tim_info *tim_info_ev = ptr;
if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
return -EPROTO;
@@ -716,7 +740,21 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
return -ENOBUFS;
- swba->arg->tim_info[swba->n_tim++] = ptr;
+ if (__le32_to_cpu(tim_info_ev->tim_len) >
+ sizeof(tim_info_ev->tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+ tim_info_arg->tim_len = tim_info_ev->tim_len;
+ tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+ tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+ tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+ tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+ swba->n_tim++;
+
return 0;
}
@@ -3151,6 +3189,38 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -3204,6 +3274,48 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -3262,6 +3374,22 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
};
static const struct wmi_ops wmi_tlv_ops = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 6c046c244705..0791a4336e80 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -148,6 +148,48 @@ static struct wmi_cmd_map wmi_cmd_map = {
.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
};
/* 10.X WMI cmd track */
@@ -271,6 +313,48 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
};
/* 10.2.4 WMI cmd track */
@@ -393,6 +477,231 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_4_cmd_map = {
+ .init_cmdid = WMI_10_4_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_4_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+ .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED,
+ .tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED,
+ .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+ .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+ .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+ .wlan_peer_caching_add_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+ .wlan_peer_caching_evict_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+ .wlan_peer_caching_restore_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+ .wlan_peer_caching_print_all_peers_info_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+ .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+ .peer_add_proxy_sta_entry_cmdid =
+ WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
+ .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
+ .nan_cmdid = WMI_10_4_NAN_CMDID,
+ .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
+ .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
+ .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+ .pdev_smart_ant_set_rx_antenna_cmdid =
+ WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ .peer_smart_ant_set_tx_antenna_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ .peer_smart_ant_set_train_info_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ .peer_smart_ant_set_node_config_ops_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ .pdev_set_antenna_switch_table_cmdid =
+ WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+ .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+ .pdev_ratepwr_chainmsk_table_cmdid =
+ WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
+ .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
+ .fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
+ .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+ .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
+ .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ .pdev_get_ani_ofdm_config_cmdid =
+ WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+ .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+ .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
+ .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+ .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
+ .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
+ .vdev_filter_neighbor_rx_packets_cmdid =
+ WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
+ .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
+ .pdev_bss_chan_info_request_cmdid =
+ WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
};
/* MAIN WMI VDEV param map */
@@ -452,6 +761,22 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_VDEV_PARAM_UNSUPPORTED,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@@ -511,6 +836,22 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -569,6 +910,97 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
+ .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10_4_VDEV_PARAM_WDS,
+ .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10_4_VDEV_PARAM_SGI,
+ .ldpc = WMI_10_4_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10_4_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_10_4_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+ .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+ .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+ .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+ .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+ .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+ .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ .early_rx_bmiss_sample_cycle =
+ WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
+ .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
+ .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+ .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -621,6 +1053,48 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -674,6 +1148,48 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -727,6 +1243,48 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
};
/* firmware 10.2 specific mappings */
@@ -849,6 +1407,139 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
+ .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period =
+ WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period =
+ WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period =
+ WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_10_4_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
+ .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+ .smart_antenna_default_antenna =
+ WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+ .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+ .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
+ .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+ .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+ .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ .remove_mcast2ucast_buffer =
+ WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ .peer_sta_ps_statechg_enable =
+ WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+ .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
+ .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+ .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+ .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+ .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+ .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+ .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+ .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
+ .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+ .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+ .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+ .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
+ .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+ .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
};
void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1232,6 +1923,8 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
return "completed [preempted]";
case WMI_SCAN_REASON_TIMEDOUT:
return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
case WMI_SCAN_REASON_MAX:
break;
}
@@ -1246,6 +1939,10 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
return "preempted";
case WMI_SCAN_EVENT_START_FAILED:
return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+ return "foreign channel exit";
default:
return "unknown";
}
@@ -1321,6 +2018,8 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_SCAN_EVENT_DEQUEUED:
case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
default:
break;
}
@@ -1433,6 +2132,40 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ struct wmi_10_4_mgmt_rx_event *ev;
+ struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
+ size_t pull_len;
+ u32 msdu_len;
+
+ ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
+ ev_hdr = &ev->hdr;
+ pull_len = sizeof(*ev);
+
+ if (skb->len < pull_len)
+ return -EPROTO;
+
+ skb_pull(skb, pull_len);
+ arg->channel = ev_hdr->channel;
+ arg->buf_len = ev_hdr->buf_len;
+ arg->status = ev_hdr->status;
+ arg->snr = ev_hdr->snr;
+ arg->phy_mode = ev_hdr->phy_mode;
+ arg->rate = ev_hdr->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+ if (skb->len < msdu_len)
+ return -EPROTO;
+
+ /* Make sure bytes added for padding are removed. */
+ skb_trim(skb, msdu_len);
+
+ return 0;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@@ -1593,6 +2326,29 @@ static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+ arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ arg->rx_frame_count = ev->rx_frame_count;
+
+ return 0;
+}
+
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_ch_info_ev_arg arg = {};
@@ -2149,33 +2905,42 @@ exit:
static void ath10k_wmi_update_tim(struct ath10k *ar,
struct ath10k_vif *arvif,
struct sk_buff *bcn,
- const struct wmi_tim_info *tim_info)
+ const struct wmi_tim_info_arg *tim_info)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
struct ieee80211_tim_ie *tim;
u8 *ies, *ie;
u8 ie_len, pvm_len;
__le32 t;
- u32 v;
+ u32 v, tim_len;
+
+ /* When FW reports 0 in tim_len, ensure atleast first byte
+ * in tim_bitmap is considered for pvm calculation.
+ */
+ tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
/* if next SWBA has no tim_changed the tim_bitmap is garbage.
* we must copy the bitmap upon change and reuse it later */
if (__le32_to_cpu(tim_info->tim_changed)) {
int i;
- BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
- sizeof(tim_info->tim_bitmap));
+ if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
+ ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
+ tim_len, sizeof(arvif->u.ap.tim_bitmap));
+ tim_len = sizeof(arvif->u.ap.tim_bitmap);
+ }
- for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+ for (i = 0; i < tim_len; i++) {
t = tim_info->tim_bitmap[i / 4];
v = __le32_to_cpu(t);
arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
}
- /* FW reports either length 0 or 16
- * so we calculate this on our own */
+ /* FW reports either length 0 or length based on max supported
+ * station. so we calculate this on our own
+ */
arvif->u.ap.tim_len = 0;
- for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+ for (i = 0; i < tim_len; i++)
if (arvif->u.ap.tim_bitmap[i])
arvif->u.ap.tim_len = i;
@@ -2199,7 +2964,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
if (pvm_len < arvif->u.ap.tim_len) {
- int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+ int expand_size = tim_len - pvm_len;
int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
void *next_ie = ie + 2 + ie_len;
@@ -2214,7 +2979,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
}
}
- if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+ if (pvm_len > tim_len) {
ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
return;
}
@@ -2278,7 +3043,21 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
break;
- arg->tim_info[i] = &ev->bcn_info[i].tim_info;
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
i++;
}
@@ -2286,12 +3065,69 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
+ u32 map, tim_len;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
+ if (tim_len) {
+ /* Exclude 4 byte guard length */
+ tim_len -= 4;
+ arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
+ } else {
+ arg->tim_info[i].tim_len = 0;
+ }
+
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+ /* 10.4 firmware doesn't have p2p support. notice of absence
+ * info can be ignored for now.
+ */
+
+ i++;
+ }
+
+ return 0;
+}
+
void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_swba_ev_arg arg = {};
u32 map;
int i = -1;
- const struct wmi_tim_info *tim_info;
+ const struct wmi_tim_info_arg *tim_info;
const struct wmi_p2p_noa_info *noa_info;
struct ath10k_vif *arvif;
struct sk_buff *bcn;
@@ -2320,7 +3156,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
break;
}
- tim_info = arg.tim_info[i];
+ tim_info = &arg.tim_info[i];
noa_info = arg.noa_info[i];
ath10k_dbg(ar, ATH10K_DBG_MGMT,
@@ -2335,6 +3171,10 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(tim_info->tim_bitmap[1]),
__le32_to_cpu(tim_info->tim_bitmap[0]));
+ /* TODO: Only first 4 word from tim_bitmap is dumped.
+ * Extend debug code to dump full tim_bitmap.
+ */
+
arvif = ath10k_get_arvif(ar, vdev_id);
if (arvif == NULL) {
ath10k_warn(ar, "no vif for vdev_id %d found\n",
@@ -3075,10 +3915,10 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
if (ar->fw_api == 1 && ar->fw_version_build > 636)
set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
- if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+ if (ar->num_rf_chains > ar->max_spatial_stream) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
- ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
- ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+ ar->num_rf_chains, ar->max_spatial_stream);
+ ar->num_rf_chains = ar->max_spatial_stream;
}
ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
@@ -3101,20 +3941,39 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
return;
}
+ if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+ ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+ TARGET_10_4_NUM_VDEVS;
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+ TARGET_10_4_NUM_VDEVS;
+ ar->num_tids = ar->num_active_peers * 2;
+ ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
+ }
+
+ /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
+ * and WMI_SERVICE_IRAM_TIDS, etc.
+ */
+
for (i = 0; i < num_mem_reqs; ++i) {
req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
- if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+ if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+ if (ar->num_active_peers)
+ num_units = ar->num_active_peers + 1;
+ else
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
/* number of units to allocate is number of
* peers, 1 extra for self peer on target */
/* this needs to be tied, host and target
* can get out of sync */
- num_units = TARGET_10X_NUM_PEERS + 1;
- else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
- num_units = TARGET_10X_NUM_VDEVS + 1;
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+ num_units = ar->max_num_vdevs + 1;
+ }
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
@@ -3576,6 +4435,73 @@ out:
dev_kfree_skb(skb);
}
+static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10_4_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ switch (id) {
+ case WMI_10_4_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10_4_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10_4_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_10_4_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ break;
+ case WMI_10_4_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_10_4_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10_4_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ break;
+ case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10_4_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10_4_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_10_4_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_10_4_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received event id %d not implemented\n", id);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
int ret;
@@ -3950,6 +4876,88 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
return buf;
}
+static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10_4 *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10_4 config = {};
+ u32 len;
+
+ config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
+ config.num_peers = __cpu_to_le32(ar->max_num_peers);
+ config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
+ config.num_tids = __cpu_to_le32(ar->num_tids);
+
+ config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
+ config.num_offload_reorder_buffs =
+ __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+
+ config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
+
+ config.rx_decap_mode = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE);
+ config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
+
+ config.rx_skip_defrag_timeout_dup_detection_check =
+ __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
+
+ config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
+ config.gtk_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
+ config.max_peer_ext_stats =
+ __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
+ config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
+
+ config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
+ config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
+ config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
+ config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
+
+ config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
+ config.tt_support =
+ __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
+ config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
+ config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
+ config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(ar, len);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10_4 *)buf->data;
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
+ return buf;
+}
+
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
{
if (arg->ie_len && !arg->ie)
@@ -4172,7 +5180,6 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
| WMI_SCAN_EVENT_BSS_CHANNEL
| WMI_SCAN_EVENT_FOREIGN_CHANNEL
| WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
arg->n_bssids = 1;
arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
@@ -5412,9 +6419,64 @@ static const struct wmi_ops wmi_10_2_4_ops = {
/* .gen_adaptive_qcs not implemented */
};
+static const struct wmi_ops wmi_10_4_ops = {
+ .rx = ath10k_wmi_10_4_op_rx,
+ .map_svc = wmi_10_4_svc_map,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
+ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_10_4_op_gen_init,
+ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+
+ /* shared with 10.2 */
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+};
+
int ath10k_wmi_attach(struct ath10k *ar)
{
switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->wmi.ops = &wmi_10_4_ops;
+ ar->wmi.cmd = &wmi_10_4_cmd_map;
+ ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+ break;
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
ar->wmi.cmd = &wmi_10_2_4_cmd_map;
ar->wmi.ops = &wmi_10_2_4_ops;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index cf44a3d080a3..0d4efc9c5796 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -150,6 +150,12 @@ enum wmi_service {
WMI_SERVICE_SAP_AUTH_OFFLOAD,
WMI_SERVICE_ATF,
WMI_SERVICE_COEX_GPIO,
+ WMI_SERVICE_ENHANCED_PROXY_STA,
+ WMI_SERVICE_TT,
+ WMI_SERVICE_PEER_CACHING,
+ WMI_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64,
/* keep last */
WMI_SERVICE_MAX,
@@ -218,6 +224,51 @@ enum wmi_main_service {
WMI_MAIN_SERVICE_TX_ENCAP,
};
+enum wmi_10_4_service {
+ WMI_10_4_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_10_4_SERVICE_SCAN_OFFLOAD,
+ WMI_10_4_SERVICE_ROAM_OFFLOAD,
+ WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_10_4_SERVICE_STA_PWRSAVE,
+ WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_10_4_SERVICE_AP_UAPSD,
+ WMI_10_4_SERVICE_AP_DFS,
+ WMI_10_4_SERVICE_11AC,
+ WMI_10_4_SERVICE_BLOCKACK,
+ WMI_10_4_SERVICE_PHYERR,
+ WMI_10_4_SERVICE_BCN_FILTER,
+ WMI_10_4_SERVICE_RTT,
+ WMI_10_4_SERVICE_RATECTRL,
+ WMI_10_4_SERVICE_WOW,
+ WMI_10_4_SERVICE_RATECTRL_CACHE,
+ WMI_10_4_SERVICE_IRAM_TIDS,
+ WMI_10_4_SERVICE_BURST,
+ WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_10_4_SERVICE_GTK_OFFLOAD,
+ WMI_10_4_SERVICE_SCAN_SCH,
+ WMI_10_4_SERVICE_CSA_OFFLOAD,
+ WMI_10_4_SERVICE_CHATTER,
+ WMI_10_4_SERVICE_COEX_FREQAVOID,
+ WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+ WMI_10_4_SERVICE_FORCE_FW_HANG,
+ WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_10_4_SERVICE_GPIO,
+ WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+ WMI_10_4_SERVICE_TX_ENCAP,
+ WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_10_4_SERVICE_EARLY_RX,
+ WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+ WMI_10_4_SERVICE_TT,
+ WMI_10_4_SERVICE_ATF,
+ WMI_10_4_SERVICE_PEER_CACHING,
+ WMI_10_4_SERVICE_COEX_GPIO,
+ WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+};
+
static inline char *wmi_service_name(int service_id)
{
#define SVCSTR(x) case x: return #x
@@ -299,6 +350,12 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
SVCSTR(WMI_SERVICE_ATF);
SVCSTR(WMI_SERVICE_COEX_GPIO);
+ SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA);
+ SVCSTR(WMI_SERVICE_TT);
+ SVCSTR(WMI_SERVICE_PEER_CACHING);
+ SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
+ SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
+ SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
default:
return NULL;
}
@@ -437,6 +494,95 @@ static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_TX_ENCAP, len);
}
+static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_10_4_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_10_4_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_10_4_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_10_4_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_10_4_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_10_4_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_10_4_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_10_4_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+ SVCMAP(WMI_10_4_SERVICE_EARLY_RX,
+ WMI_SERVICE_EARLY_RX, len);
+ SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+ WMI_SERVICE_ENHANCED_PROXY_STA, len);
+ SVCMAP(WMI_10_4_SERVICE_TT,
+ WMI_SERVICE_TT, len);
+ SVCMAP(WMI_10_4_SERVICE_ATF,
+ WMI_SERVICE_ATF, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_CACHING,
+ WMI_SERVICE_PEER_CACHING, len);
+ SVCMAP(WMI_10_4_SERVICE_COEX_GPIO,
+ WMI_SERVICE_COEX_GPIO, len);
+ SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+ SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+ SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+}
+
#undef SVCMAP
/* 2 word representation of MAC addr */
@@ -565,6 +711,48 @@ struct wmi_cmd_map {
u32 tdls_set_state_cmdid;
u32 tdls_peer_update_cmdid;
u32 adaptive_qcs_cmdid;
+ u32 scan_update_request_cmdid;
+ u32 vdev_standby_response_cmdid;
+ u32 vdev_resume_response_cmdid;
+ u32 wlan_peer_caching_add_peer_cmdid;
+ u32 wlan_peer_caching_evict_peer_cmdid;
+ u32 wlan_peer_caching_restore_peer_cmdid;
+ u32 wlan_peer_caching_print_all_peers_info_cmdid;
+ u32 peer_update_wds_entry_cmdid;
+ u32 peer_add_proxy_sta_entry_cmdid;
+ u32 rtt_keepalive_cmdid;
+ u32 oem_req_cmdid;
+ u32 nan_cmdid;
+ u32 vdev_ratemask_cmdid;
+ u32 qboost_cfg_cmdid;
+ u32 pdev_smart_ant_enable_cmdid;
+ u32 pdev_smart_ant_set_rx_antenna_cmdid;
+ u32 peer_smart_ant_set_tx_antenna_cmdid;
+ u32 peer_smart_ant_set_train_info_cmdid;
+ u32 peer_smart_ant_set_node_config_ops_cmdid;
+ u32 pdev_set_antenna_switch_table_cmdid;
+ u32 pdev_set_ctl_table_cmdid;
+ u32 pdev_set_mimogain_table_cmdid;
+ u32 pdev_ratepwr_table_cmdid;
+ u32 pdev_ratepwr_chainmsk_table_cmdid;
+ u32 pdev_fips_cmdid;
+ u32 tt_set_conf_cmdid;
+ u32 fwtest_cmdid;
+ u32 vdev_atf_request_cmdid;
+ u32 peer_atf_request_cmdid;
+ u32 pdev_get_ani_cck_config_cmdid;
+ u32 pdev_get_ani_ofdm_config_cmdid;
+ u32 pdev_reserve_ast_entry_cmdid;
+ u32 pdev_get_nfcal_power_cmdid;
+ u32 pdev_get_tpc_cmdid;
+ u32 pdev_get_ast_info_cmdid;
+ u32 vdev_set_dscp_tid_map_cmdid;
+ u32 pdev_get_info_cmdid;
+ u32 vdev_get_info_cmdid;
+ u32 vdev_filter_neighbor_rx_packets_cmdid;
+ u32 mu_cal_start_cmdid;
+ u32 set_cca_params_cmdid;
+ u32 pdev_bss_chan_info_request_cmdid;
};
/*
@@ -1220,6 +1408,216 @@ enum wmi_10_2_event_id {
WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
};
+enum wmi_10_4_cmd_id {
+ WMI_10_4_START_CMDID = 0x9000,
+ WMI_10_4_END_CMDID = 0x9FFF,
+ WMI_10_4_INIT_CMDID,
+ WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID,
+ WMI_10_4_STOP_SCAN_CMDID,
+ WMI_10_4_SCAN_CHAN_LIST_CMDID,
+ WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_10_4_ECHO_CMDID,
+ WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+ WMI_10_4_PDEV_SET_PARAM_CMDID,
+ WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_10_4_VDEV_CREATE_CMDID,
+ WMI_10_4_VDEV_DELETE_CMDID,
+ WMI_10_4_VDEV_START_REQUEST_CMDID,
+ WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10_4_VDEV_UP_CMDID,
+ WMI_10_4_VDEV_STOP_CMDID,
+ WMI_10_4_VDEV_DOWN_CMDID,
+ WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10_4_VDEV_SET_PARAM_CMDID,
+ WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+ WMI_10_4_PEER_CREATE_CMDID,
+ WMI_10_4_PEER_DELETE_CMDID,
+ WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+ WMI_10_4_PEER_SET_PARAM_CMDID,
+ WMI_10_4_PEER_ASSOC_CMDID,
+ WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_10_4_PEER_MCAST_GROUP_CMDID,
+ WMI_10_4_BCN_TX_CMDID,
+ WMI_10_4_PDEV_SEND_BCN_CMDID,
+ WMI_10_4_BCN_PRB_TMPL_CMDID,
+ WMI_10_4_BCN_FILTER_RX_CMDID,
+ WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10_4_MGMT_TX_CMDID,
+ WMI_10_4_PRB_TMPL_CMDID,
+ WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10_4_ADDBA_SEND_CMDID,
+ WMI_10_4_ADDBA_STATUS_CMDID,
+ WMI_10_4_DELBA_SEND_CMDID,
+ WMI_10_4_ADDBA_SET_RESP_CMDID,
+ WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+ WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+ WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+ WMI_10_4_DBGLOG_CFG_CMDID,
+ WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+ WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+ WMI_10_4_PDEV_QVIT_CMDID,
+ WMI_10_4_ROAM_SCAN_MODE,
+ WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10_4_ROAM_SCAN_PERIOD,
+ WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10_4_ROAM_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_PERIOD,
+ WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10_4_P2P_GO_SET_BEACON_IE,
+ WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+ WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+ WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_10_4_PDEV_SUSPEND_CMDID,
+ WMI_10_4_PDEV_RESUME_CMDID,
+ WMI_10_4_ADD_BCN_FILTER_CMDID,
+ WMI_10_4_RMV_BCN_FILTER_CMDID,
+ WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10_4_WOW_ENABLE_CMDID,
+ WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_10_4_RTT_MEASREQ_CMDID,
+ WMI_10_4_RTT_TSF_CMDID,
+ WMI_10_4_RTT_KEEPALIVE_CMDID,
+ WMI_10_4_OEM_REQ_CMDID,
+ WMI_10_4_NAN_CMDID,
+ WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10_4_REQUEST_STATS_CMDID,
+ WMI_10_4_GPIO_CONFIG_CMDID,
+ WMI_10_4_GPIO_OUTPUT_CMDID,
+ WMI_10_4_VDEV_RATEMASK_CMDID,
+ WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+ WMI_10_4_GTK_OFFLOAD_CMDID,
+ WMI_10_4_QBOOST_CFG_CMDID,
+ WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_10_4_FORCE_FW_HANG_CMDID,
+ WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+ WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ WMI_10_4_PDEV_FIPS_CMDID,
+ WMI_10_4_TT_SET_CONF_CMDID,
+ WMI_10_4_FWTEST_CMDID,
+ WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+ WMI_10_4_PEER_ATF_REQUEST_CMDID,
+ WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+ WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_10_4_PDEV_GET_TPC_CMDID,
+ WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+ WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_10_4_PDEV_GET_INFO_CMDID,
+ WMI_10_4_VDEV_GET_INFO_CMDID,
+ WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_10_4_MU_CAL_START_CMDID,
+ WMI_10_4_SET_CCA_PARAMS_CMDID,
+ WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
+};
+
+enum wmi_10_4_event_id {
+ WMI_10_4_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10_4_READY_EVENTID,
+ WMI_10_4_DEBUG_MESG_EVENTID,
+ WMI_10_4_START_EVENTID = 0x9000,
+ WMI_10_4_END_EVENTID = 0x9FFF,
+ WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID,
+ WMI_10_4_ECHO_EVENTID,
+ WMI_10_4_UPDATE_STATS_EVENTID,
+ WMI_10_4_INST_RSSI_STATS_EVENTID,
+ WMI_10_4_VDEV_START_RESP_EVENTID,
+ WMI_10_4_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10_4_VDEV_RESUME_REQ_EVENTID,
+ WMI_10_4_VDEV_STOPPED_EVENTID,
+ WMI_10_4_PEER_STA_KICKOUT_EVENTID,
+ WMI_10_4_HOST_SWBA_EVENTID,
+ WMI_10_4_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10_4_MGMT_RX_EVENTID,
+ WMI_10_4_CHAN_INFO_EVENTID,
+ WMI_10_4_PHYERR_EVENTID,
+ WMI_10_4_ROAM_EVENTID,
+ WMI_10_4_PROFILE_MATCH,
+ WMI_10_4_DEBUG_PRINT_EVENTID,
+ WMI_10_4_PDEV_QVIT_EVENTID,
+ WMI_10_4_WLAN_PROFILE_DATA_EVENTID,
+ WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_RTT_ERROR_REPORT_EVENTID,
+ WMI_10_4_RTT_KEEPALIVE_EVENTID,
+ WMI_10_4_OEM_CAPABILITY_EVENTID,
+ WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_OEM_ERROR_REPORT_EVENTID,
+ WMI_10_4_NAN_EVENTID,
+ WMI_10_4_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID,
+ WMI_10_4_GTK_REKEY_FAIL_EVENTID,
+ WMI_10_4_DCS_INTERFERENCE_EVENTID,
+ WMI_10_4_PDEV_TPC_CONFIG_EVENTID,
+ WMI_10_4_CSA_HANDLING_EVENTID,
+ WMI_10_4_GPIO_INPUT_EVENTID,
+ WMI_10_4_PEER_RATECODE_LIST_EVENTID,
+ WMI_10_4_GENERIC_BUFFER_EVENTID,
+ WMI_10_4_MCAST_BUF_RELEASE_EVENTID,
+ WMI_10_4_MCAST_LIST_AGEOUT_EVENTID,
+ WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_10_4_WDS_PEER_EVENTID,
+ WMI_10_4_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_10_4_PDEV_FIPS_EVENTID,
+ WMI_10_4_TT_STATS_EVENTID,
+ WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID,
+ WMI_10_4_PDEV_NFCAL_POWER_EVENTID,
+ WMI_10_4_PDEV_TPC_EVENTID,
+ WMI_10_4_PDEV_GET_AST_INFO_EVENTID,
+ WMI_10_4_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
+};
+
enum wmi_phy_mode {
MODE_11A = 0, /* 11a Mode */
MODE_11G = 1, /* 11b/g Mode */
@@ -1349,7 +1747,8 @@ enum wmi_channel_change_cause {
/* Indicate reason for channel switch */
#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
-#define WMI_MAX_SPATIAL_STREAM 3
+#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */
+#define WMI_10_4_MAX_SPATIAL_STREAM 4
/* HT Capabilities*/
#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */
@@ -1979,8 +2378,224 @@ struct wmi_resource_config_10_2 {
__le32 feature_mask;
} __packed;
-#define NUM_UNITS_IS_NUM_VDEVS 0x1
-#define NUM_UNITS_IS_NUM_PEERS 0x2
+#define NUM_UNITS_IS_NUM_VDEVS BIT(0)
+#define NUM_UNITS_IS_NUM_PEERS BIT(1)
+#define NUM_UNITS_IS_NUM_ACTIVE_PEERS BIT(2)
+
+struct wmi_resource_config_10_4 {
+ /* Number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* Number of peer nodes to support */
+ __le32 num_peers;
+
+ /* Number of active peer nodes to support */
+ __le32 num_active_peers;
+
+ /* In offload mode, target supports features like WOW, chatter and other
+ * protocol offloads. In order to support them some functionalities like
+ * reorder buffering, PN checking need to be done in target.
+ * This determines maximum number of peers supported by target in
+ * offload mode.
+ */
+ __le32 num_offload_peers;
+
+ /* Number of reorder buffers available for doing target based reorder
+ * Rx reorder buffering
+ */
+ __le32 num_offload_reorder_buffs;
+
+ /* Number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* Total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /* Max skid for resolving hash collisions.
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /* The nominal chain mask for transmit.
+ * The chain mask may be modified dynamically, e.g. to operate AP tx
+ * with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /* The nominal chain mask for receive.
+ * The chain mask may be modified dynamically, e.g. for a client to use
+ * a reduced number of chains for receive if the traffic to the client
+ * is low enough that it doesn't require downlink MIMO or antenna
+ * diversity. This configuration parameter specifies the nominal
+ * chain-mask that should be used when not operating with a reduced
+ * set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /* What rx reorder timeout (ms) to use for the AC.
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have already
+ * been received. This parameter specifies the timeout in milliseconds
+ * for each class.
+ */
+ __le32 rx_timeout_pri[4];
+
+ /* What mode the rx should decap packets to.
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types.
+ * This setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ __le32 scan_max_pending_req;
+
+ __le32 bmiss_offload_max_vdev;
+
+ __le32 roam_offload_max_vdev;
+
+ __le32 roam_offload_max_ap_profiles;
+
+ /* How many groups to use for mcast->ucast conversion.
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each peer
+ * within the multicast group. This num_mcast_groups configuration
+ * parameter tells the target how many multicast groups to provide
+ * storage for within its multicast group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /* Size to alloc for the mcast membership table.
+ * This num_mcast_table_elems configuration parameter tells the target
+ * how many peer elements it needs to provide storage for in its
+ * multicast group membership table. These multicast group membership
+ * table elements are shared by the multicast groups stored within
+ * the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /* Whether/how to do multicast->unicast conversion.
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group membership
+ * table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast address
+ * from the tx frame is found in the multicast group membership
+ * table. If the IP multicast address is not found, drop the frame
+ * 2 -> Convert multicast frames to unicast, if the IP multicast address
+ * from the tx frame is found in the multicast group membership
+ * table. If the IP multicast address is not found, transmit the
+ * frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /* How much memory to allocate for a tx PPDU dbg log.
+ * This parameter controls how much memory the target will allocate to
+ * store a log of tx PPDU meta-information (how large the PPDU was,
+ * when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* How many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /* MAC DMA burst size. 0 -default, 1 -256B */
+ __le32 dma_burst_size;
+
+ /* Fixed delimiters to be inserted after every MPDU to account for
+ * interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /* Determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering
+ * is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /* Configuration for VoW : No of Video nodes to be supported and max
+ * no of descriptors for each video link (node).
+ */
+ __le32 vow_config;
+
+ /* Maximum vdev that could use gtk offload */
+ __le32 gtk_offload_max_vdev;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /* Max number of tx fragments per MSDU.
+ * This parameter controls the max number of tx fragments per MSDU.
+ * This will passed by target as part of the WMI_SERVICE_READY event
+ * and is overridden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+
+ /* Max number of extended peer stats.
+ * This parameter controls the max number of peers for which extended
+ * statistics are supported by target
+ */
+ __le32 max_peer_ext_stats;
+
+ /* Smart antenna capabilities information.
+ * 1 - Smart antenna is enabled
+ * 0 - Smart antenna is disabled
+ * In future this can contain smart antenna specific capabilities.
+ */
+ __le32 smart_ant_cap;
+
+ /* User can configure the buffers allocated for each AC (BE, BK, VI, VO)
+ * during init.
+ */
+ __le32 bk_minfree;
+ __le32 be_minfree;
+ __le32 vi_minfree;
+ __le32 vo_minfree;
+
+ /* Rx batch mode capability.
+ * 1 - Rx batch mode enabled
+ * 0 - Rx batch mode disabled
+ */
+ __le32 rx_batchmode;
+
+ /* Thermal throttling capability.
+ * 1 - Capable of thermal throttling
+ * 0 - Not capable of thermal throttling
+ */
+ __le32 tt_support;
+
+ /* ATF configuration.
+ * 1 - Enable ATF
+ * 0 - Disable ATF
+ */
+ __le32 atf_config;
+
+ /* Configure padding to manage IP header un-alignment
+ * 1 - Enable padding
+ * 0 - Disable padding
+ */
+ __le32 iphdr_pad_config;
+
+ /* qwrap configuration
+ * 1 - This is qwrap configuration
+ * 0 - This is not qwrap
+ */
+ __le32 qwrap_config;
+} __packed;
/* strucutre describing host memory chunk. */
struct host_memory_chunk {
@@ -2014,6 +2629,11 @@ struct wmi_init_cmd_10_2 {
struct wmi_host_mem_chunks mem_chunks;
} __packed;
+struct wmi_init_cmd_10_4 {
+ struct wmi_resource_config_10_4 resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
struct wmi_chan_list_entry {
__le16 freq;
u8 phy_mode; /* valid for 10.2 only */
@@ -2260,15 +2880,17 @@ enum wmi_bss_filter {
};
enum wmi_scan_event_type {
- WMI_SCAN_EVENT_STARTED = 0x1,
- WMI_SCAN_EVENT_COMPLETED = 0x2,
- WMI_SCAN_EVENT_BSS_CHANNEL = 0x4,
- WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
- WMI_SCAN_EVENT_DEQUEUED = 0x10,
- WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */
- WMI_SCAN_EVENT_START_FAILED = 0x40,
- WMI_SCAN_EVENT_RESTARTED = 0x80,
- WMI_SCAN_EVENT_MAX = 0x8000
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8),
+ WMI_SCAN_EVENT_MAX = BIT(15),
};
enum wmi_scan_completion_reason {
@@ -2276,6 +2898,7 @@ enum wmi_scan_completion_reason {
WMI_SCAN_REASON_CANCELLED,
WMI_SCAN_REASON_PREEMPTED,
WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
WMI_SCAN_REASON_MAX,
};
@@ -2329,6 +2952,21 @@ struct wmi_mgmt_rx_event_v2 {
u8 buf[0];
} __packed;
+struct wmi_10_4_mgmt_rx_hdr {
+ __le32 channel;
+ __le32 snr;
+ u8 rssi_ctl[4];
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+} __packed;
+
+struct wmi_10_4_mgmt_rx_event {
+ struct wmi_10_4_mgmt_rx_hdr hdr;
+ u8 buf[0];
+} __packed;
+
#define WMI_RX_STATUS_OK 0x00
#define WMI_RX_STATUS_ERR_CRC 0x01
#define WMI_RX_STATUS_ERR_DECRYPT 0x08
@@ -2613,6 +3251,48 @@ struct wmi_pdev_param_map {
u32 burst_dur;
u32 burst_enable;
u32 cal_period;
+ u32 aggr_burst;
+ u32 rx_decap_mode;
+ u32 smart_antenna_default_antenna;
+ u32 igmpmld_override;
+ u32 igmpmld_tid;
+ u32 antenna_gain;
+ u32 rx_filter;
+ u32 set_mcast_to_ucast_tid;
+ u32 proxy_sta_mode;
+ u32 set_mcast2ucast_mode;
+ u32 set_mcast2ucast_buffer;
+ u32 remove_mcast2ucast_buffer;
+ u32 peer_sta_ps_statechg_enable;
+ u32 igmpmld_ac_override;
+ u32 block_interbss;
+ u32 set_disable_reset_cmdid;
+ u32 set_msdu_ttl_cmdid;
+ u32 set_ppdu_duration_cmdid;
+ u32 txbf_sound_period_cmdid;
+ u32 set_promisc_mode_cmdid;
+ u32 set_burst_mode_cmdid;
+ u32 en_stats;
+ u32 mu_group_policy;
+ u32 noise_detection;
+ u32 noise_threshold;
+ u32 dpd_enable;
+ u32 set_mcast_bcast_echo;
+ u32 atf_strict_sch;
+ u32 atf_sched_duration;
+ u32 ant_plzn;
+ u32 mgmt_retry_limit;
+ u32 sensitivity_level;
+ u32 signed_txpower_2g;
+ u32 signed_txpower_5g;
+ u32 enable_per_tid_amsdu;
+ u32 enable_per_tid_ampdu;
+ u32 cca_threshold;
+ u32 rts_fixed_rate;
+ u32 pdev_reset;
+ u32 wapi_mbssid_offset;
+ u32 arp_srcaddr;
+ u32 arp_dstaddr;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -2828,6 +3508,100 @@ enum wmi_10x_pdev_param {
WMI_10X_PDEV_PARAM_CAL_PERIOD
};
+enum wmi_10_4_pdev_param {
+ WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+ WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+ WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+ WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_PMF_QOS,
+ WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_DCS,
+ WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+ WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+ WMI_10_4_PDEV_PARAM_PROXY_STA,
+ WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_10_4_PDEV_PARAM_AGGR_BURST,
+ WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_10_4_PDEV_PARAM_BURST_DUR,
+ WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+ WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+ WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_10_4_PDEV_PARAM_RX_FILTER,
+ WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_10_4_PDEV_PARAM_EN_STATS,
+ WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+ WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_10_4_PDEV_PARAM_ANT_PLZN,
+ WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+ WMI_10_4_PDEV_PARAM_PDEV_RESET,
+ WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+ WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+};
+
struct wmi_pdev_set_param_cmd {
__le32 param_id;
__le32 param_value;
@@ -3506,6 +4280,22 @@ struct wmi_vdev_param_map {
u32 drop_unencry;
u32 tx_encap_type;
u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+ u32 rc_num_retries;
+ u32 cabq_maxdur;
+ u32 mfptest_set;
+ u32 rts_fixed_rate;
+ u32 vht_sgimask;
+ u32 vht80_ratemask;
+ u32 early_rx_adjust_enable;
+ u32 early_rx_tgt_bmiss_num;
+ u32 early_rx_bmiss_sample_cycle;
+ u32 early_rx_slop_step;
+ u32 early_rx_init_slop;
+ u32 early_rx_adjust_pause;
+ u32 proxy_sta;
+ u32 meru_vc;
+ u32 rx_decap_type;
+ u32 bw_nss_ratemask;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -3764,6 +4554,75 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
};
+enum wmi_10_4_vdev_param {
+ WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+ WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_10_4_VDEV_PARAM_SLOT_TIME,
+ WMI_10_4_VDEV_PARAM_PREAMBLE,
+ WMI_10_4_VDEV_PARAM_SWBA_TIME,
+ WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+ WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+ WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_10_4_VDEV_PARAM_WDS,
+ WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+ WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+ WMI_10_4_VDEV_PARAM_CHWIDTH,
+ WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+ WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_10_4_VDEV_PARAM_MGMT_RATE,
+ WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+ WMI_10_4_VDEV_PARAM_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_SGI,
+ WMI_10_4_VDEV_PARAM_LDPC,
+ WMI_10_4_VDEV_PARAM_TX_STBC,
+ WMI_10_4_VDEV_PARAM_RX_STBC,
+ WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_10_4_VDEV_PARAM_DEF_KEYID,
+ WMI_10_4_VDEV_PARAM_NSS,
+ WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+ WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+ WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_10_4_VDEV_PARAM_TXBF,
+ WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+ WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+ WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+ WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_10_4_VDEV_PARAM_PROXY_STA,
+ WMI_10_4_VDEV_PARAM_MERU_VC,
+ WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+};
+
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
@@ -4305,6 +5164,14 @@ struct wmi_tim_info {
__le32 tim_num_ps_pending;
} __packed;
+struct wmi_tim_info_arg {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ const __le32 *tim_bitmap;
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
/* Maximum number of NOA Descriptors supported */
#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
#define WMI_P2P_OPPPS_ENABLE_BIT BIT(0)
@@ -4336,6 +5203,47 @@ struct wmi_host_swba_event {
struct wmi_bcn_info bcn_info[0];
} __packed;
+/* 16 words = 512 client + 1 word = for guard */
+#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
+
+struct wmi_10_4_tim_info {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ __le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE];
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1
+
+struct wmi_10_4_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ */
+ u8 changed;
+ /* NOA index */
+ u8 index;
+ /* Bit 0 - Opp PS state of the AP
+ * Bits 1-7 - Ctwindow in TUs
+ */
+ u8 ctwindow_oppps;
+ /* Number of NOA descriptors */
+ u8 num_descriptors;
+
+ struct wmi_p2p_noa_descriptor
+ noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_10_4_bcn_info {
+ struct wmi_10_4_tim_info tim_info;
+ struct wmi_10_4_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_10_4_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_10_4_bcn_info bcn_info[0];
+} __packed;
+
#define WMI_MAX_AP_VDEV 16
struct wmi_tbtt_offset_event {
@@ -4660,6 +5568,18 @@ struct wmi_chan_info_event {
__le32 cycle_count;
} __packed;
+struct wmi_10_4_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+} __packed;
+
struct wmi_peer_sta_kickout_event {
struct wmi_mac_addr peer_macaddr;
} __packed;
@@ -4840,6 +5760,9 @@ struct wmi_ch_info_ev_arg {
__le32 noise_floor;
__le32 rx_clear_count;
__le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
};
struct wmi_vdev_start_ev_arg {
@@ -4855,7 +5778,7 @@ struct wmi_peer_kick_ev_arg {
struct wmi_swba_ev_arg {
__le32 vdev_map;
- const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
+ struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV];
const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index fc595b92ac56..c5f8bc4b5595 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -455,7 +455,7 @@
#define AR_PHY_MODE (AR_SM_BASE + 0x8)
#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
-#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
+#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24))
#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
@@ -495,7 +495,7 @@
#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
-#define AR_PHY_TEST (AR_SM_BASE + 0x160)
+#define AR_PHY_TEST (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160))
#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
#define AR_PHY_TEST_BBB_OBS_SEL_S 19
@@ -521,24 +521,29 @@
#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
-#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
+#define AR_PHY_TSTDAC (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168))
-#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c))
#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
-#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
-#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
-#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
-#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180)
-#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
-#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
+#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174))
+#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178))
+#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c))
+#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180))
+#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190))
+#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194))
#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
+#define AR_PHY_HEAVYCLIP_1 (AR_SM_BASE + 0x19c)
+#define AR_PHY_HEAVYCLIP_2 (AR_SM_BASE + 0x1a0)
+#define AR_PHY_HEAVYCLIP_3 (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_4 (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_5 (AR_SM_BASE + 0x1ac)
#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index dbf8f4959642..da32c8faad94 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -765,6 +765,8 @@ static int read_file_reset(struct seq_file *file, void *data)
[RESET_TYPE_BEACON_STUCK] = "Stuck Beacon",
[RESET_TYPE_MCI] = "MCI Reset",
[RESET_TYPE_CALIBRATION] = "Calibration error",
+ [RESET_TX_DMA_ERROR] = "Tx DMA stop error",
+ [RESET_RX_DMA_ERROR] = "Rx DMA stop error",
};
int i;
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index a8e9319958e6..cd68c5f0e751 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -50,6 +50,8 @@ enum ath_reset_type {
RESET_TYPE_BEACON_STUCK,
RESET_TYPE_MCI,
RESET_TYPE_CALIBRATION,
+ RESET_TX_DMA_ERROR,
+ RESET_RX_DMA_ERROR,
__RESET_TYPE_MAX
};
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index e98a9eaba7ff..1ece42c2443d 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -30,6 +30,157 @@ struct ath_radar_data {
u8 pulse_length_pri;
};
+/**** begin: CHIRP ************************************************************/
+
+/* min and max gradients for defined FCC chirping pulses, given by
+ * - 20MHz chirp width over a pulse width of 50us
+ * - 5MHz chirp width over a pulse width of 100us
+ */
+static const int BIN_DELTA_MIN = 1;
+static const int BIN_DELTA_MAX = 10;
+
+/* we need at least 3 deltas / 4 samples for a reliable chirp detection */
+#define NUM_DIFFS 3
+static const int FFT_NUM_SAMPLES = (NUM_DIFFS + 1);
+
+/* Threshold for difference of delta peaks */
+static const int MAX_DIFF = 2;
+
+/* width range to be checked for chirping */
+static const int MIN_CHIRP_PULSE_WIDTH = 20;
+static const int MAX_CHIRP_PULSE_WIDTH = 110;
+
+struct ath9k_dfs_fft_20 {
+ u8 bin[28];
+ u8 lower_bins[3];
+} __packed;
+struct ath9k_dfs_fft_40 {
+ u8 bin[64];
+ u8 lower_bins[3];
+ u8 upper_bins[3];
+} __packed;
+
+static inline int fft_max_index(u8 *bins)
+{
+ return (bins[2] & 0xfc) >> 2;
+}
+static inline int fft_max_magnitude(u8 *bins)
+{
+ return (bins[0] & 0xc0) >> 6 | bins[1] << 2 | (bins[2] & 0x03) << 10;
+}
+static inline u8 fft_bitmap_weight(u8 *bins)
+{
+ return bins[0] & 0x3f;
+}
+
+static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
+ bool is_ctl, bool is_ext)
+{
+ const int DFS_UPPER_BIN_OFFSET = 64;
+ /* if detected radar on both channels, select the significant one */
+ if (is_ctl && is_ext) {
+ /* first check wether channels have 'strong' bins */
+ is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
+ is_ext = fft_bitmap_weight(fft->upper_bins) != 0;
+
+ /* if still unclear, take higher magnitude */
+ if (is_ctl && is_ext) {
+ int mag_lower = fft_max_magnitude(fft->lower_bins);
+ int mag_upper = fft_max_magnitude(fft->upper_bins);
+ if (mag_upper > mag_lower)
+ is_ctl = false;
+ else
+ is_ext = false;
+ }
+ }
+ if (is_ctl)
+ return fft_max_index(fft->lower_bins);
+ return fft_max_index(fft->upper_bins) + DFS_UPPER_BIN_OFFSET;
+}
+static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
+ int datalen, bool is_ctl, bool is_ext)
+{
+ int i;
+ int max_bin[FFT_NUM_SAMPLES];
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int prev_delta;
+
+ if (IS_CHAN_HT40(ah->curchan)) {
+ struct ath9k_dfs_fft_40 *fft = (struct ath9k_dfs_fft_40 *) data;
+ int num_fft_packets = datalen / sizeof(*fft);
+ if (num_fft_packets == 0)
+ return false;
+
+ ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n",
+ datalen, num_fft_packets);
+ if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+ ath_dbg(common, DFS, "not enough packets for chirp\n");
+ return false;
+ }
+ /* HW sometimes adds 2 garbage bytes in front of FFT samples */
+ if ((datalen % sizeof(*fft)) == 2) {
+ fft = (struct ath9k_dfs_fft_40 *) (data + 2);
+ ath_dbg(common, DFS, "fixing datalen by 2\n");
+ }
+ if (IS_CHAN_HT40MINUS(ah->curchan)) {
+ int temp = is_ctl;
+ is_ctl = is_ext;
+ is_ext = temp;
+ }
+ for (i = 0; i < FFT_NUM_SAMPLES; i++)
+ max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
+ is_ext);
+ } else {
+ struct ath9k_dfs_fft_20 *fft = (struct ath9k_dfs_fft_20 *) data;
+ int num_fft_packets = datalen / sizeof(*fft);
+ if (num_fft_packets == 0)
+ return false;
+ ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n",
+ datalen, num_fft_packets);
+ if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+ ath_dbg(common, DFS, "not enough packets for chirp\n");
+ return false;
+ }
+ /* in ht20, this is a 6-bit signed number => shift it to 0 */
+ for (i = 0; i < FFT_NUM_SAMPLES; i++)
+ max_bin[i] = fft_max_index(fft[i].lower_bins) ^ 0x20;
+ }
+ ath_dbg(common, DFS, "bin_max = [%d, %d, %d, %d]\n",
+ max_bin[0], max_bin[1], max_bin[2], max_bin[3]);
+
+ /* Check for chirp attributes within specs
+ * a) delta of adjacent max_bins is within range
+ * b) delta of adjacent deltas are within tolerance
+ */
+ prev_delta = 0;
+ for (i = 0; i < NUM_DIFFS; i++) {
+ int ddelta = -1;
+ int delta = max_bin[i + 1] - max_bin[i];
+
+ /* ensure gradient is within valid range */
+ if (abs(delta) < BIN_DELTA_MIN || abs(delta) > BIN_DELTA_MAX) {
+ ath_dbg(common, DFS, "CHIRP: invalid delta %d "
+ "in sample %d\n", delta, i);
+ return false;
+ }
+ if (i == 0)
+ goto done;
+ ddelta = delta - prev_delta;
+ if (abs(ddelta) > MAX_DIFF) {
+ ath_dbg(common, DFS, "CHIRP: ddelta %d too high\n",
+ ddelta);
+ return false;
+ }
+done:
+ ath_dbg(common, DFS, "CHIRP - %d: delta=%d, ddelta=%d\n",
+ i, delta, ddelta);
+ prev_delta = delta;
+ }
+ return true;
+}
+/**** end: CHIRP **************************************************************/
+
/* convert pulse duration to usecs, considering clock mode */
static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
{
@@ -113,12 +264,6 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
return false;
}
- /*
- * TODO: check chirping pulses
- * checks for chirping are dependent on the DFS regulatory domain
- * used, which is yet TBD
- */
-
/* convert duration to usecs */
pe->width = dur_to_usecs(sc->sc_ah, dur);
pe->rssi = rssi;
@@ -190,6 +335,16 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
return;
+ if (pe.width > MIN_CHIRP_PULSE_WIDTH &&
+ pe.width < MAX_CHIRP_PULSE_WIDTH) {
+ bool is_ctl = !!(ard.pulse_bw_info & PRI_CH_RADAR_FOUND);
+ bool is_ext = !!(ard.pulse_bw_info & EXT_CH_RADAR_FOUND);
+ int clen = datalen - 3;
+ pe.chirp = ath9k_check_chirping(sc, data, clen, is_ctl, is_ext);
+ } else {
+ pe.chirp = false;
+ }
+
ath_dbg(common, DFS,
"ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
"width=%d, rssi=%d, delta_ts=%llu\n",
@@ -198,7 +353,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
sc->dfs_prev_pulse_ts = pe.ts;
if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
ath9k_dfs_process_radar_pulse(sc, &pe);
- if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+ if (IS_CHAN_HT40(ah->curchan) &&
+ ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
ath9k_dfs_process_radar_pulse(sc, &pe);
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6c75fb1ab77d..d3189daf9996 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -491,10 +491,9 @@ bool ath_stoprecv(struct ath_softc *sc)
if (!(ah->ah_flags & AH_UNPLUGGED) &&
unlikely(!stopped)) {
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Could not stop RX, we could be "
- "confusing the DMA engine when we start RX up\n");
- ATH_DBG_WARN_ON_ONCE(!stopped);
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "Failed to stop Rx DMA\n");
+ RESET_STAT_INC(sc, RESET_RX_DMA_ERROR);
}
return stopped && !reset;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3ad79bb4f2c2..b766a7fc60aa 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1883,8 +1883,11 @@ bool ath_drain_all_txq(struct ath_softc *sc)
npend |= BIT(i);
}
- if (npend)
- ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+ if (npend) {
+ RESET_STAT_INC(sc, RESET_TX_DMA_ERROR);
+ ath_dbg(common, RESET,
+ "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+ }
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
@@ -2470,8 +2473,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bf = list_first_entry(&bf_q, struct ath_buf, list);
hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
- if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
- hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) {
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
sizeof(*hdr), DMA_TO_DEVICE);
}
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 1b5ad1965607..cc5c592fc4c0 100644
--- a/drivers/net/wireless/ath/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -273,7 +273,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
tmp_false_count++;
}
}
- if (ps.count < min_count)
+ if (ps.count <= min_count)
/* did not reach minimum count, drop sequence */
continue;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index c79cfe02ec80..e4be2d9bbac4 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -736,6 +736,92 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
return rc;
}
+/* internal functions for device reset and starting AP */
+static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
+ size_t probe_ies_len, const u8 *probe_ies,
+ size_t assoc_ies_len, const u8 *assoc_ies)
+
+{
+ int rc;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* FW do not form regular beacon, so bcon IE's are not set
+ * For the DMG bcon, when it will be supported, bcon IE's will
+ * be reused; add something like:
+ * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+ * bcon->beacon_ies);
+ */
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, probe_ies_len, probe_ies);
+ if (rc) {
+ wil_err(wil, "set_ie(PROBE_RESP) failed\n");
+ return rc;
+ }
+
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, assoc_ies_len, assoc_ies);
+ if (rc) {
+ wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8 *ssid, size_t ssid_len, u32 privacy,
+ int bi, u8 chan,
+ size_t probe_ies_len, const u8 *probe_ies,
+ size_t assoc_ies_len, const u8 *assoc_ies,
+ u8 hidden_ssid)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+
+ mutex_lock(&wil->mutex);
+
+ __wil_down(wil);
+ rc = __wil_up(wil);
+ if (rc)
+ goto out;
+
+ rc = wmi_set_ssid(wil, ssid_len, ssid);
+ if (rc)
+ goto out;
+
+ rc = _wil_cfg80211_set_ies(wiphy, probe_ies_len, probe_ies,
+ assoc_ies_len, assoc_ies);
+ if (rc)
+ goto out;
+
+ wil->privacy = privacy;
+ wil->channel = chan;
+ wil->hidden_ssid = hidden_ssid;
+
+ netif_carrier_on(ndev);
+
+ rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+ if (rc)
+ goto err_pcp_start;
+
+ rc = wil_bcast_init(wil);
+ if (rc)
+ goto err_bcast;
+
+ goto out; /* success */
+
+err_bcast:
+ wmi_pcp_stop(wil);
+err_pcp_start:
+ netif_carrier_off(ndev);
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_beacon_data *bcon)
@@ -746,6 +832,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
const u8 *pr_ies = NULL;
size_t pr_ies_len = 0;
int rc;
+ u32 privacy = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
@@ -760,40 +847,41 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
}
- /* FW do not form regular beacon, so bcon IE's are not set
- * For the DMG bcon, when it will be supported, bcon IE's will
- * be reused; add something like:
- * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
- * bcon->beacon_ies);
- */
- rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
- if (rc) {
- wil_err(wil, "set_ie(PROBE_RESP) failed\n");
- return rc;
- }
+ if (pr_ies && cfg80211_find_ie(WLAN_EID_RSN, pr_ies, pr_ies_len))
+ privacy = 1;
- rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
- bcon->assocresp_ies_len,
- bcon->assocresp_ies);
- if (rc) {
- wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
- return rc;
+ /* in case privacy has changed, need to restart the AP */
+ if (wil->privacy != privacy) {
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+ wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
+ wil->privacy, privacy);
+
+ rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
+ wdev->ssid_len, privacy,
+ wdev->beacon_interval,
+ wil->channel, pr_ies_len, pr_ies,
+ bcon->assocresp_ies_len,
+ bcon->assocresp_ies,
+ wil->hidden_ssid);
+ } else {
+ rc = _wil_cfg80211_set_ies(wiphy, pr_ies_len, pr_ies,
+ bcon->assocresp_ies_len,
+ bcon->assocresp_ies);
}
- return 0;
+ return rc;
}
static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_ap_settings *info)
{
- int rc = 0;
+ int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct ieee80211_channel *channel = info->chandef.chan;
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
- u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
const u8 *pr_ies = NULL;
@@ -807,6 +895,23 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
+ switch (info->hidden_ssid) {
+ case NL80211_HIDDEN_SSID_NOT_IN_USE:
+ hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
+ break;
+
+ case NL80211_HIDDEN_SSID_ZERO_LEN:
+ hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
+ break;
+
+ case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+ hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
+ break;
+
+ default:
+ wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
+ return -EOPNOTSUPP;
+ }
wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
channel->center_freq, info->privacy ? "secure" : "open");
wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
@@ -830,70 +935,14 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
}
- wil_set_recovery_state(wil, fw_recovery_idle);
-
- mutex_lock(&wil->mutex);
-
- __wil_down(wil);
- rc = __wil_up(wil);
- if (rc)
- goto out;
-
- rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
- if (rc)
- goto out;
-
- /* IE's */
- /* bcon 'head IE's are not relevant for 60g band */
- /*
- * FW do not form regular beacon, so bcon IE's are not set
- * For the DMG bcon, when it will be supported, bcon IE's will
- * be reused; add something like:
- * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
- * bcon->beacon_ies);
- */
- wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
- wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
- bcon->assocresp_ies);
-
- wil->privacy = info->privacy;
-
- switch (info->hidden_ssid) {
- case NL80211_HIDDEN_SSID_NOT_IN_USE:
- hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
- break;
-
- case NL80211_HIDDEN_SSID_ZERO_LEN:
- hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
- break;
-
- case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
- hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
- break;
-
- default:
- rc = -EOPNOTSUPP;
- goto out;
- }
-
- netif_carrier_on(ndev);
-
- rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
- channel->hw_value, hidden_ssid);
- if (rc)
- goto err_pcp_start;
+ rc = _wil_cfg80211_start_ap(wiphy, ndev,
+ info->ssid, info->ssid_len, info->privacy,
+ info->beacon_interval, channel->hw_value,
+ pr_ies_len, pr_ies,
+ bcon->assocresp_ies_len,
+ bcon->assocresp_ies,
+ hidden_ssid);
- rc = wil_bcast_init(wil);
- if (rc)
- goto err_bcast;
-
- goto out; /* success */
-err_bcast:
- wmi_pcp_stop(wil);
-err_pcp_start:
- netif_carrier_off(ndev);
-out:
- mutex_unlock(&wil->mutex);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 275355d46a36..c63e4a35eaa0 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -559,6 +559,8 @@ struct wil6210_priv {
/* profile */
u32 monitor_flags;
u32 privacy; /* secure connection? */
+ u8 hidden_ssid; /* relevant in AP mode */
+ u16 channel; /* relevant in AP mode */
int sinfo_gen;
u32 ap_isolate; /* no intra-BSS communication */
/* interrupt moderation */
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 916123a3d74e..a335f94c72ff 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -929,8 +929,8 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
b43_lo_write(dev, &cal->ctl);
}
-/* Periodic LO maintanance work */
-void b43_lo_g_maintanance_work(struct b43_wldev *dev)
+/* Periodic LO maintenance work */
+void b43_lo_g_maintenance_work(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_g *gphy = phy->g;
diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h
index 3b27e20eff80..7b4df3883bc2 100644
--- a/drivers/net/wireless/b43/lo.h
+++ b/drivers/net/wireless/b43/lo.h
@@ -80,7 +80,7 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all);
-void b43_lo_g_maintanance_work(struct b43_wldev *dev);
+void b43_lo_g_maintenance_work(struct b43_wldev *dev);
void b43_lo_g_cleanup(struct b43_wldev *dev);
void b43_lo_g_init(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 727ce6edb4b3..462310e6e88f 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -3004,7 +3004,7 @@ static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev)
phy->rev == 1) {
//TODO: implement rev1 workaround
}
- b43_lo_g_maintanance_work(dev);
+ b43_lo_g_maintenance_work(dev);
b43_mac_enable(dev);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index d86d1f1f1c91..ffe526070d6f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -5785,6 +5785,7 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct ieee80211_supported_band *band;
__le32 bandlist[3];
u32 n_bands;
@@ -5798,6 +5799,19 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
if (err)
return err;
+ for (i = 0; i < wiphy->iface_combinations->max_interfaces &&
+ i < ARRAY_SIZE(drvr->addresses); i++) {
+ u8 *addr = drvr->addresses[i].addr;
+
+ memcpy(addr, drvr->mac, ETH_ALEN);
+ if (i) {
+ addr[0] |= BIT(1);
+ addr[ETH_ALEN - 1] ^= i;
+ }
+ }
+ wiphy->addresses = drvr->addresses;
+ wiphy->n_addresses = i;
+
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->cipher_suites = __wl_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index fd74a9c6e9ac..746304121cdb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -21,6 +21,7 @@
#ifndef BRCMFMAC_CORE_H
#define BRCMFMAC_CORE_H
+#include <net/cfg80211.h>
#include "fweh.h"
#define TOE_TX_CSUM_OL 0x00000001
@@ -118,6 +119,8 @@ struct brcmf_pub {
/* Multicast data packets sent to dongle */
unsigned long tx_multicast;
+ struct mac_address addresses[BRCMF_MAX_IFS];
+
struct brcmf_if *iflist[BRCMF_MAX_IFS];
struct mutex proto_block;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index ab775a5d5b33..d2c5747e3ac9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1472,9 +1472,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
wl->timers = t;
#ifdef DEBUG
- t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC);
- if (t->name)
- strcpy(t->name, name);
+ t->name = kstrdup(name, GFP_ATOMIC);
#endif
return t;
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 7603546d2de3..29185aeccba8 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -467,7 +467,6 @@ static struct spi_driver spi_driver = {
.remove = cw1200_spi_disconnect,
.driver = {
.name = "cw1200_wlan_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &cw1200_pm_ops,
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 08eb229e7816..36818c7f30b9 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1410,7 +1410,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
{
-#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000)
+#define HW_PHY_OFF_LOOP_DELAY (msecs_to_jiffies(50))
struct host_command cmd = {
.host_command = CARD_DISABLE_PHY_OFF,
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 7f4cb692cc57..af1b3e6839fa 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3259,7 +3259,7 @@ il3945_show_measurement(struct device *d, struct device_attribute *attr,
while (size && PAGE_SIZE - len) {
hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
+ PAGE_SIZE - len, true);
len = strlen(buf);
if (PAGE_SIZE - len)
buf[len++] = '\n';
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
index 344010153196..908b9f4fef6f 100644
--- a/drivers/net/wireless/iwlegacy/debug.c
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -515,12 +515,8 @@ il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
eeprom_ver);
for (ofs = 0; ofs < eeprom_len; ofs += 16) {
- pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
- buf_size - pos, 0);
- pos += strlen(buf + pos);
- if (buf_size - pos > 0)
- buf[pos++] = '\n';
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+ ofs, ptr + ofs);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index b15e4c7acbec..ff63cb5632eb 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -19,6 +19,7 @@
#include "cfg80211.h"
#include "main.h"
+#include "11n.h"
static char *reg_alpha2;
module_param(reg_alpha2, charp, 0);
@@ -34,12 +35,38 @@ static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
},
};
-static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta = {
.limits = mwifiex_ap_sta_limits,
.num_different_channels = 1,
.n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
.max_interfaces = MWIFIEX_MAX_BSS_NUM,
.beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40),
+};
+
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta_vht = {
+ .limits = mwifiex_ap_sta_limits,
+ .num_different_channels = 1,
+ .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+ .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+};
+
+static const struct
+ieee80211_iface_combination mwifiex_iface_comb_ap_sta_drcs = {
+ .limits = mwifiex_ap_sta_limits,
+ .num_different_channels = 2,
+ .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+ .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+ .beacon_int_infra_match = true,
};
/*
@@ -441,7 +468,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
* - Country codes
* - Sub bands (first channel, number of channels, maximum Tx power)
*/
-static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
{
u8 no_of_triplet = 0;
struct ieee80211_country_ie_triplet *t;
@@ -804,10 +831,13 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
priv->bss_type = MWIFIEX_BSS_TYPE_STA;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
+ case NL80211_IFTYPE_P2P_GO:
+ priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+ break;
case NL80211_IFTYPE_AP:
priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
@@ -1115,8 +1145,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_GO:
switch (type) {
case NL80211_IFTYPE_STATION:
- if (mwifiex_cfg80211_init_p2p_client(priv))
+ if (mwifiex_cfg80211_deinit_p2p(priv))
return -EFAULT;
+ priv->adapter->curr_iface_comb.p2p_intf--;
+ priv->adapter->curr_iface_comb.sta_intf++;
dev->ieee80211_ptr->iftype = type;
break;
case NL80211_IFTYPE_ADHOC:
@@ -2788,6 +2820,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
struct mwifiex_adapter *adapter = priv->adapter;
+ struct sk_buff *skb, *tmp;
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_remove(priv);
@@ -2795,6 +2828,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
@@ -2954,7 +2990,6 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
MWIFIEX_MEF_MAX_BYTESEQ)) {
mwifiex_dbg(priv->adapter, ERROR,
"Pattern not supported\n");
- kfree(mef_entry);
return -EOPNOTSUPP;
}
@@ -3036,9 +3071,12 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
- if (wowlan->n_patterns || wowlan->magic_pkt)
+ if (wowlan->n_patterns || wowlan->magic_pkt) {
ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
&mef_entry[1], wowlan);
+ if (ret)
+ goto err;
+ }
if (!mef_cfg.criteria)
mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -3048,6 +3086,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
HostCmd_ACT_GEN_SET, 0,
&mef_cfg, true);
+
+err:
kfree(mef_entry);
return ret;
}
@@ -3360,6 +3400,72 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
}
static int
+mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *addr, u8 oper_class,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ unsigned long flags;
+ u16 chan;
+ u8 second_chan_offset, band;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_ptr = mwifiex_get_sta_entry(priv, addr);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (!sta_ptr) {
+ wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+ __func__, addr);
+ return -ENOENT;
+ }
+
+ if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] &
+ WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) {
+ wiphy_err(wiphy, "%pM do not support tdls cs\n", addr);
+ return -ENOENT;
+ }
+
+ if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+ sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) {
+ wiphy_err(wiphy, "channel switch is running, abort request\n");
+ return -EALREADY;
+ }
+
+ chan = chandef->chan->hw_value;
+ second_chan_offset = mwifiex_get_sec_chan_offset(chan);
+ band = chandef->chan->band;
+ mwifiex_start_tdls_cs(priv, addr, chan, second_chan_offset, band);
+
+ return 0;
+}
+
+static void
+mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ unsigned long flags;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_ptr = mwifiex_get_sta_entry(priv, addr);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (!sta_ptr) {
+ wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+ __func__, addr);
+ } else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+ sta_ptr->tdls_status == TDLS_IN_BASE_CHAN ||
+ sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) {
+ wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n",
+ addr);
+ } else
+ mwifiex_stop_tdls_cs(priv, addr);
+}
+
+static int
mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
@@ -3575,6 +3681,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_coalesce = mwifiex_cfg80211_set_coalesce,
.tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
.tdls_oper = mwifiex_cfg80211_tdls_oper,
+ .tdls_channel_switch = mwifiex_cfg80211_tdls_chan_switch,
+ .tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
.add_station = mwifiex_cfg80211_add_station,
.change_station = mwifiex_cfg80211_change_station,
.get_channel = mwifiex_cfg80211_get_channel,
@@ -3672,7 +3780,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
else
wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
- wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
+ if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+ else if (adapter->is_hw_11ac_capable)
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_vht;
+ else
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
wiphy->n_iface_combinations = 1;
/* Initialize cipher suits */
@@ -3709,6 +3822,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
NL80211_FEATURE_INACTIVITY_TIMER |
NL80211_FEATURE_NEED_OBSS_SCAN;
+ if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+ wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+
if (adapter->fw_api_ver == MWIFIEX_FW_V15)
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 51e344789ba2..098e1f14dc9a 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -141,6 +141,9 @@ enum mwifiex_tdls_status {
TDLS_SETUP_COMPLETE,
TDLS_SETUP_FAILURE,
TDLS_LINK_TEARDOWN,
+ TDLS_CHAN_SWITCHING,
+ TDLS_IN_BASE_CHAN,
+ TDLS_IN_OFF_CHAN,
};
enum mwifiex_tdls_error_code {
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index cd09051710e6..cff38ad129aa 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -169,14 +169,17 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123)
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
+#define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183)
#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
#define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197)
#define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
#define TLV_TYPE_CHANNEL_STATS (PROPRIETARY_TLV_BASE_ID + 198)
#define TLV_BTCOEX_WL_AGGR_WINSIZE (PROPRIETARY_TLV_BASE_ID + 202)
#define TLV_BTCOEX_WL_SCANTIME (PROPRIETARY_TLV_BASE_ID + 203)
+#define TLV_TYPE_BSS_MODE (PROPRIETARY_TLV_BASE_ID + 206)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -200,6 +203,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
+#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15))
#define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
@@ -359,6 +363,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
+#define HostCmd_CMD_TDLS_CONFIG 0x0100
+#define HostCmd_CMD_MC_POLICY 0x0121
#define HostCmd_CMD_TDLS_OPER 0x0122
#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
@@ -509,8 +515,10 @@ enum P2P_MODES {
#define EVENT_TDLS_GENERIC_EVENT 0x00000052
#define EVENT_RADAR_DETECTED 0x00000053
#define EVENT_CHANNEL_REPORT_RDY 0x00000054
+#define EVENT_TX_DATA_PAUSE 0x00000055
#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
+#define EVENT_MULTI_CHAN_INFO 0x0000006a
#define EVENT_TX_STATUS_REPORT 0x00000074
#define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076
@@ -545,7 +553,27 @@ enum P2P_MODES {
#define ACT_TDLS_DELETE 0x00
#define ACT_TDLS_CREATE 0x01
#define ACT_TDLS_CONFIG 0x02
-#define TDLS_EVENT_LINK_TEAR_DOWN 3
+
+#define TDLS_EVENT_LINK_TEAR_DOWN 3
+#define TDLS_EVENT_CHAN_SWITCH_RESULT 7
+#define TDLS_EVENT_START_CHAN_SWITCH 8
+#define TDLS_EVENT_CHAN_SWITCH_STOPPED 9
+
+#define TDLS_BASE_CHANNEL 0
+#define TDLS_OFF_CHANNEL 1
+
+#define ACT_TDLS_CS_ENABLE_CONFIG 0x00
+#define ACT_TDLS_CS_INIT 0x06
+#define ACT_TDLS_CS_STOP 0x07
+#define ACT_TDLS_CS_PARAMS 0x08
+
+#define MWIFIEX_DEF_CS_UNIT_TIME 2
+#define MWIFIEX_DEF_CS_THR_OTHERLINK 10
+#define MWIFIEX_DEF_THR_DIRECTLINK 0
+#define MWIFIEX_DEF_CS_TIME 10
+#define MWIFIEX_DEF_CS_TIMEOUT 16
+#define MWIFIEX_DEF_CS_REG_CLASS 12
+#define MWIFIEX_DEF_CS_PERIODICITY 1
#define MWIFIEX_FW_V15 15
@@ -1131,6 +1159,13 @@ struct host_cmd_ds_tx_rate_query {
u8 ht_info;
} __packed;
+struct mwifiex_tx_pause_tlv {
+ struct mwifiex_ie_types_header header;
+ u8 peermac[ETH_ALEN];
+ u8 tx_pause;
+ u8 pkt_cnt;
+} __packed;
+
enum Host_Sleep_Action {
HS_CONFIGURE = 0x0001,
HS_ACTIVATE = 0x0002,
@@ -1249,6 +1284,36 @@ struct host_cmd_ds_tdls_oper {
u8 peer_mac[ETH_ALEN];
} __packed;
+struct mwifiex_tdls_config {
+ __le16 enable;
+};
+
+struct mwifiex_tdls_config_cs_params {
+ u8 unit_time;
+ u8 thr_otherlink;
+ u8 thr_directlink;
+};
+
+struct mwifiex_tdls_init_cs_params {
+ u8 peer_mac[ETH_ALEN];
+ u8 primary_chan;
+ u8 second_chan_offset;
+ u8 band;
+ __le16 switch_time;
+ __le16 switch_timeout;
+ u8 reg_class;
+ u8 periodicity;
+} __packed;
+
+struct mwifiex_tdls_stop_cs_params {
+ u8 peer_mac[ETH_ALEN];
+};
+
+struct host_cmd_ds_tdls_config {
+ __le16 tdls_action;
+ u8 tdls_data[1];
+} __packed;
+
struct mwifiex_chan_desc {
__le16 start_freq;
u8 chan_width;
@@ -1370,6 +1435,11 @@ struct host_cmd_ds_802_11_scan_ext {
u8 tlv_buffer[1];
} __packed;
+struct mwifiex_ie_types_bss_mode {
+ struct mwifiex_ie_types_header header;
+ u8 bss_mode;
+} __packed;
+
struct mwifiex_ie_types_bss_scan_rsp {
struct mwifiex_ie_types_header header;
u8 bssid[ETH_ALEN];
@@ -1908,6 +1978,12 @@ struct mwifiex_radar_det_event {
__le32 passed;
} __packed;
+struct mwifiex_ie_types_multi_chan_info {
+ struct mwifiex_ie_types_header header;
+ __le16 status;
+ u8 tlv_buffer[0];
+} __packed;
+
struct meas_rpt_map {
u8 rssi:3;
u8 unmeasured:1;
@@ -1927,10 +2003,18 @@ struct host_cmd_ds_802_11_subsc_evt {
__le16 events;
} __packed;
+struct chan_switch_result {
+ u8 cur_chan;
+ u8 status;
+ u8 reason;
+} __packed;
+
struct mwifiex_tdls_generic_event {
__le16 type;
u8 peer_mac[ETH_ALEN];
union {
+ struct chan_switch_result switch_result;
+ u8 cs_stop_reason;
__le16 reason_code;
__le16 reserved;
} u;
@@ -1971,6 +2055,11 @@ struct host_cmd_ds_coalesce_cfg {
struct coalesce_receive_filt_rule rule[0];
} __packed;
+struct host_cmd_ds_multi_chan_policy {
+ __le16 action;
+ __le16 policy;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2035,9 +2124,11 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sta_list sta_list;
struct host_cmd_11ac_vht_cfg vht_cfg;
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+ struct host_cmd_ds_tdls_config tdls_config;
struct host_cmd_ds_tdls_oper tdls_oper;
struct host_cmd_ds_chan_rpt_req chan_rpt_req;
struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
+ struct host_cmd_ds_multi_chan_policy mc_policy;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 0ba894509413..abf52d25b981 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -409,6 +409,8 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
int ret;
ret = mwifiex_uap_parse_tail_ies(priv, info);
+
+ if (ret)
return ret;
return mwifiex_set_mgmt_beacon_data_ies(priv, info);
@@ -477,6 +479,7 @@ int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
ar_ie, &priv->assocresp_idx);
done:
+ kfree(gen_ie);
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index df7fdc09d38c..8fa363add970 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -77,7 +77,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->media_connected = false;
eth_broadcast_addr(priv->curr_addr);
-
+ priv->port_open = false;
priv->pkt_tx_ctrl = 0;
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->data_rate = 0; /* Initially indicate the rate as auto */
@@ -499,6 +499,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
INIT_LIST_HEAD(&priv->sta_list);
INIT_LIST_HEAD(&priv->auto_tdls_list);
skb_queue_head_init(&priv->tdls_txq);
+ skb_queue_head_init(&priv->bypass_txq);
spin_lock_init(&priv->tx_ba_stream_tbl_lock);
spin_lock_init(&priv->rx_reorder_tbl_lock);
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 56b024a6aaa5..3cda1f956f0b 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -783,6 +783,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
priv->scan_block = true;
+ else
+ priv->port_open = true;
done:
/* Need to indicate IOCTL complete */
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 3ba4e0e04223..278dc94eaecb 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -276,6 +276,7 @@ process_start:
!adapter->pm_wakeup_fw_try) &&
(is_command_pending(adapter) ||
!skb_queue_empty(&adapter->tx_data_q) ||
+ !mwifiex_bypass_txlist_empty(adapter) ||
!mwifiex_wmm_lists_empty(adapter))) {
adapter->pm_wakeup_fw_try = true;
mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
@@ -299,9 +300,16 @@ process_start:
if ((!adapter->scan_chan_gap_enabled &&
adapter->scan_processing) || adapter->data_sent ||
+ mwifiex_is_tdls_chan_switching
+ (mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_STA)) ||
(mwifiex_wmm_lists_empty(adapter) &&
+ mwifiex_bypass_txlist_empty(adapter) &&
skb_queue_empty(&adapter->tx_data_q))) {
if (adapter->cmd_sent || adapter->curr_cmd ||
+ !mwifiex_is_send_cmd_allowed
+ (mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_STA)) ||
(!is_command_pending(adapter)))
break;
}
@@ -342,7 +350,9 @@ process_start:
continue;
}
- if (!adapter->cmd_sent && !adapter->curr_cmd) {
+ if (!adapter->cmd_sent && !adapter->curr_cmd &&
+ mwifiex_is_send_cmd_allowed
+ (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
if (mwifiex_exec_next_cmd(adapter) == -1) {
ret = -1;
break;
@@ -365,7 +375,25 @@ process_start:
if ((adapter->scan_chan_gap_enabled ||
!adapter->scan_processing) &&
- !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
+ !adapter->data_sent &&
+ !mwifiex_bypass_txlist_empty(adapter) &&
+ !mwifiex_is_tdls_chan_switching
+ (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
+ mwifiex_process_bypass_tx(adapter);
+ if (adapter->hs_activated) {
+ adapter->is_hs_configured = false;
+ mwifiex_hs_activated_event
+ (mwifiex_get_priv
+ (adapter, MWIFIEX_BSS_ROLE_ANY),
+ false);
+ }
+ }
+
+ if ((adapter->scan_chan_gap_enabled ||
+ !adapter->scan_processing) &&
+ !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) &&
+ !mwifiex_is_tdls_chan_switching
+ (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
adapter->is_hs_configured = false;
@@ -379,6 +407,7 @@ process_start:
if (adapter->delay_null_pkt && !adapter->cmd_sent &&
!adapter->curr_cmd && !is_command_pending(adapter) &&
(mwifiex_wmm_lists_empty(adapter) &&
+ mwifiex_bypass_txlist_empty(adapter) &&
skb_queue_empty(&adapter->tx_data_q))) {
if (!mwifiex_send_null_packet
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -649,6 +678,26 @@ mwifiex_close(struct net_device *dev)
return 0;
}
+static bool
+mwifiex_bypass_tx_queue(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+
+ if (ntohs(eth_hdr->h_proto) == ETH_P_PAE ||
+ mwifiex_is_skb_mgmt_frame(skb) ||
+ (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ (ntohs(eth_hdr->h_proto) == ETH_P_TDLS))) {
+ mwifiex_dbg(priv->adapter, DATA,
+ "bypass txqueue; eth type %#x, mgmt %d\n",
+ ntohs(eth_hdr->h_proto),
+ mwifiex_is_skb_mgmt_frame(skb));
+ return true;
+ }
+
+ return false;
+}
/*
* Add buffer into wmm tx queue and queue work to transmit it.
*/
@@ -666,8 +715,14 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
}
}
- atomic_inc(&priv->adapter->tx_pending);
- mwifiex_wmm_add_buf_txqueue(priv, skb);
+ if (mwifiex_bypass_tx_queue(priv, skb)) {
+ atomic_inc(&priv->adapter->tx_pending);
+ atomic_inc(&priv->adapter->bypass_tx_pending);
+ mwifiex_wmm_add_buf_bypass_txqueue(priv, skb);
+ } else {
+ atomic_inc(&priv->adapter->tx_pending);
+ mwifiex_wmm_add_buf_txqueue(priv, skb);
+ }
mwifiex_queue_main_work(priv->adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index ae98b5b83b1f..face7478937f 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -281,6 +281,7 @@ struct mwifiex_ra_list_tbl {
u8 amsdu_in_ampdu;
u16 total_pkt_count;
bool tdls_link;
+ bool tx_paused;
};
struct mwifiex_tid_tbl {
@@ -294,6 +295,7 @@ struct mwifiex_tid_tbl {
struct mwifiex_wmm_desc {
struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
u32 packets_out[MAX_NUM_TID];
+ u32 pkts_paused[MAX_NUM_TID];
/* spin lock to protect ra_list */
spinlock_t ra_list_spinlock;
struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS];
@@ -517,6 +519,7 @@ struct mwifiex_private {
u8 frame_type;
u8 curr_addr[ETH_ALEN];
u8 media_connected;
+ u8 port_open;
u32 num_tx_timeout;
/* track consecutive timeout */
u8 tx_timeout_cnt;
@@ -662,6 +665,7 @@ struct mwifiex_private {
struct cfg80211_beacon_data beacon_after;
struct mwifiex_11h_intf_state state_11h;
struct mwifiex_ds_mem_rw mem_rw;
+ struct sk_buff_head bypass_txq;
};
@@ -768,6 +772,7 @@ struct mwifiex_sta_node {
u8 tdls_status;
struct mwifiex_tdls_capab tdls_cap;
struct mwifiex_station_stats stats;
+ u8 tx_pause;
};
struct mwifiex_auto_tdls_peer {
@@ -831,6 +836,7 @@ struct mwifiex_adapter {
wait_queue_head_t init_wait_q;
void *card;
struct mwifiex_if_ops if_ops;
+ atomic_t bypass_tx_pending;
atomic_t rx_pending;
atomic_t tx_pending;
atomic_t cmd_pending;
@@ -979,6 +985,7 @@ struct mwifiex_adapter {
u8 coex_win_size;
u8 coex_tx_win_size;
u8 coex_rx_win_size;
+ bool drcs_enabled;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1330,6 +1337,21 @@ static inline u8 mwifiex_is_any_intf_active(struct mwifiex_private *priv)
return 0;
}
+static inline u8 mwifiex_is_tdls_link_setup(u8 status)
+{
+ switch (status) {
+ case TDLS_SETUP_COMPLETE:
+ case TDLS_CHAN_SWITCHING:
+ case TDLS_IN_BASE_CHAN:
+ case TDLS_IN_OFF_CHAN:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
u32 func_init_shutdown);
int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -1458,6 +1480,9 @@ struct mwifiex_sta_node *
mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
struct mwifiex_sta_node *
mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv);
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv);
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv);
int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
u8 action_code, u8 dialog_token,
u16 status_code, const u8 *extra_ies,
@@ -1488,6 +1513,13 @@ void mwifiex_check_auto_tdls(unsigned long context);
void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv);
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac);
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+ u8 primary_chan, u8 second_chan_offset, u8 band);
+
int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
void *data_buf);
@@ -1522,6 +1554,12 @@ void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+ struct sk_buff *event);
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb);
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index baf9715ddc10..ef8da8ebcbab 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -823,6 +823,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
int i;
u8 ssid_filter;
struct mwifiex_ie_types_htcap *ht_cap;
+ struct mwifiex_ie_types_bss_mode *bss_mode;
/* The tlv_buf_len is calculated for each scan command. The TLVs added
in this routine will be preserved since the routine that sends the
@@ -908,6 +909,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
wildcard_ssid_tlv->max_ssid_length =
IEEE80211_MAX_SSID_LEN;
+ if (!memcmp(user_scan_in->ssid_list[i].ssid,
+ "DIRECT-", 7))
+ wildcard_ssid_tlv->max_ssid_length = 0xfe;
+
memcpy(wildcard_ssid_tlv->ssid,
user_scan_in->ssid_list[i].ssid, ssid_len);
@@ -968,6 +973,15 @@ mwifiex_config_scan(struct mwifiex_private *priv,
else
*max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
+ if (adapter->ext_scan) {
+ bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos;
+ bss_mode->header.type = cpu_to_le16(TLV_TYPE_BSS_MODE);
+ bss_mode->header.len = cpu_to_le16(sizeof(bss_mode->bss_mode));
+ bss_mode->bss_mode = scan_cfg_out->bss_mode;
+ tlv_pos += sizeof(bss_mode->header) +
+ le16_to_cpu(bss_mode->header.len);
+ }
+
/* If the input config or adapter has the number of Probes set,
add tlv */
if (num_probes) {
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 037adcd1f484..a49a80dd773e 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -26,6 +26,10 @@
#include "11n.h"
#include "11ac.h"
+static bool drcs;
+module_param(drcs, bool, 0644);
+MODULE_PARM_DESC(drcs, "multi-channel operation:1, single-channel operation:0");
+
static bool disable_auto_ds;
module_param(disable_auto_ds, bool, 0);
MODULE_PARM_DESC(disable_auto_ds,
@@ -1512,6 +1516,22 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
}
static int
+mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_multi_chan_policy *mc_pol = &cmd->params.mc_policy;
+ const u16 *drcs_info = data_buf;
+
+ mc_pol->action = cpu_to_le16(cmd_action);
+ mc_pol->policy = cpu_to_le16(*drcs_info);
+ cmd->command = cpu_to_le16(HostCmd_CMD_MC_POLICY);
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_multi_chan_policy) +
+ S_DS_GEN);
+ return 0;
+}
+
+static int
mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
u16 cmd_action, void *data_buf)
@@ -1576,6 +1596,50 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
}
static int
+mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_tdls_config *tdls_config = &cmd->params.tdls_config;
+ struct mwifiex_tdls_init_cs_params *config;
+ struct mwifiex_tdls_config *init_config;
+ u16 len;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+ tdls_config->tdls_action = cpu_to_le16(cmd_action);
+ le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
+
+ switch (cmd_action) {
+ case ACT_TDLS_CS_ENABLE_CONFIG:
+ init_config = data_buf;
+ len = sizeof(*init_config);
+ memcpy(tdls_config->tdls_data, init_config, len);
+ break;
+ case ACT_TDLS_CS_INIT:
+ config = data_buf;
+ len = sizeof(*config);
+ memcpy(tdls_config->tdls_data, config, len);
+ break;
+ case ACT_TDLS_CS_STOP:
+ len = sizeof(struct mwifiex_tdls_stop_cs_params);
+ memcpy(tdls_config->tdls_data, data_buf, len);
+ break;
+ case ACT_TDLS_CS_PARAMS:
+ len = sizeof(struct mwifiex_tdls_config_cs_params);
+ memcpy(tdls_config->tdls_data, data_buf, len);
+ break;
+ default:
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Unknown TDLS configuration\n");
+ return -ENOTSUPP;
+ }
+
+ le16_add_cpu(&cmd->size, len);
+ return 0;
+}
+
+static int
mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
void *data_buf)
@@ -1933,10 +1997,12 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
cmd_ptr->params.bss_mode.con_type =
CONNECTION_TYPE_ADHOC;
- else if (priv->bss_mode == NL80211_IFTYPE_STATION)
+ else if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT)
cmd_ptr->params.bss_mode.con_type =
CONNECTION_TYPE_INFRA;
- else if (priv->bss_mode == NL80211_IFTYPE_AP)
+ else if (priv->bss_mode == NL80211_IFTYPE_AP ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_GO)
cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
cmd_ptr->size = cpu_to_le16(sizeof(struct
host_cmd_ds_set_bss_mode) + S_DS_GEN);
@@ -1958,6 +2024,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_TDLS_OPER:
ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
break;
+ case HostCmd_CMD_TDLS_CONFIG:
+ ret = mwifiex_cmd_tdls_config(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
data_buf);
@@ -1966,6 +2036,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_MC_POLICY:
+ ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2082,6 +2156,18 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
if (ret)
return -1;
}
+
+ if (drcs) {
+ adapter->drcs_enabled = true;
+ if (ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+ ret = mwifiex_send_cmd(priv,
+ HostCmd_CMD_MC_POLICY,
+ HostCmd_ACT_GEN_SET, 0,
+ &adapter->drcs_enabled,
+ true);
+ if (ret)
+ return -1;
+ }
}
/* get tx rate */
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index b645884b3b97..89e8dafb4738 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -599,6 +599,7 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
"info: key: GTK is set\n");
priv->wpa_is_gtk_set = true;
priv->scan_block = false;
+ priv->port_open = true;
}
}
@@ -629,6 +630,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
priv->wpa_is_gtk_set = true;
priv->scan_block = false;
+ priv->port_open = true;
}
}
@@ -1191,12 +1193,15 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_TDLS_OPER:
ret = mwifiex_ret_tdls_oper(priv, resp);
+ case HostCmd_CMD_MC_POLICY:
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
break;
case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
break;
+ case HostCmd_CMD_TDLS_CONFIG:
+ break;
default:
mwifiex_dbg(adapter, ERROR,
"CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 848de2621958..3d18c585e543 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -54,6 +54,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->media_connected = false;
priv->scan_block = false;
+ priv->port_open = false;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) {
@@ -153,6 +154,7 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_tdls_generic_event *tdls_evt =
(void *)event_skb->data + sizeof(adapter->event_cause);
+ u8 *mac = tdls_evt->peer_mac;
/* reserved 2 bytes are not mandatory in tdls event */
if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
@@ -175,6 +177,59 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
le16_to_cpu(tdls_evt->u.reason_code),
GFP_KERNEL);
break;
+ case TDLS_EVENT_CHAN_SWITCH_RESULT:
+ mwifiex_dbg(adapter, EVENT, "tdls channel switch result :\n");
+ mwifiex_dbg(adapter, EVENT,
+ "status=0x%x, reason=0x%x cur_chan=%d\n",
+ tdls_evt->u.switch_result.status,
+ tdls_evt->u.switch_result.reason,
+ tdls_evt->u.switch_result.cur_chan);
+
+ /* tdls channel switch failed */
+ if (tdls_evt->u.switch_result.status != 0) {
+ switch (tdls_evt->u.switch_result.cur_chan) {
+ case TDLS_BASE_CHANNEL:
+ sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+ break;
+ case TDLS_OFF_CHANNEL:
+ sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+ break;
+ default:
+ break;
+ }
+ return ret;
+ }
+
+ /* tdls channel switch success */
+ switch (tdls_evt->u.switch_result.cur_chan) {
+ case TDLS_BASE_CHANNEL:
+ if (sta_ptr->tdls_status == TDLS_IN_BASE_CHAN)
+ break;
+ mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+ false);
+ sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+ break;
+ case TDLS_OFF_CHANNEL:
+ if (sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)
+ break;
+ mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+ true);
+ sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case TDLS_EVENT_START_CHAN_SWITCH:
+ mwifiex_dbg(adapter, EVENT, "tdls start channel switch...\n");
+ sta_ptr->tdls_status = TDLS_CHAN_SWITCHING;
+ break;
+ case TDLS_EVENT_CHAN_SWITCH_STOPPED:
+ mwifiex_dbg(adapter, EVENT,
+ "tdls chan switch stopped, reason=%d\n",
+ tdls_evt->u.cs_stop_reason);
+ break;
default:
break;
}
@@ -182,6 +237,145 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
return ret;
}
+static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
+ struct mwifiex_ie_types_header *tlv)
+{
+ struct mwifiex_tx_pause_tlv *tp;
+ struct mwifiex_sta_node *sta_ptr;
+ unsigned long flags;
+
+ tp = (void *)tlv;
+ mwifiex_dbg(priv->adapter, EVENT,
+ "uap tx_pause: %pM pause=%d, pkts=%d\n",
+ tp->peermac, tp->tx_pause,
+ tp->pkt_cnt);
+
+ if (ether_addr_equal(tp->peermac, priv->netdev->dev_addr)) {
+ if (tp->tx_pause)
+ priv->port_open = false;
+ else
+ priv->port_open = true;
+ } else if (is_multicast_ether_addr(tp->peermac)) {
+ mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause);
+ } else {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+ sta_ptr->tx_pause = tp->tx_pause;
+ mwifiex_update_ralist_tx_pause(priv, tp->peermac,
+ tp->tx_pause);
+ }
+ }
+}
+
+static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
+ struct mwifiex_ie_types_header *tlv)
+{
+ struct mwifiex_tx_pause_tlv *tp;
+ struct mwifiex_sta_node *sta_ptr;
+ int status;
+ unsigned long flags;
+
+ tp = (void *)tlv;
+ mwifiex_dbg(priv->adapter, EVENT,
+ "sta tx_pause: %pM pause=%d, pkts=%d\n",
+ tp->peermac, tp->tx_pause,
+ tp->pkt_cnt);
+
+ if (ether_addr_equal(tp->peermac, priv->cfg_bssid)) {
+ if (tp->tx_pause)
+ priv->port_open = false;
+ else
+ priv->port_open = true;
+ } else {
+ if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ return;
+
+ status = mwifiex_get_tdls_link_status(priv, tp->peermac);
+ if (mwifiex_is_tdls_link_setup(status)) {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+ sta_ptr->tx_pause = tp->tx_pause;
+ mwifiex_update_ralist_tx_pause(priv,
+ tp->peermac,
+ tp->tx_pause);
+ }
+ }
+ }
+}
+
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb)
+{
+ struct mwifiex_ie_types_multi_chan_info *chan_info;
+ u16 status;
+
+ chan_info = (void *)event_skb->data + sizeof(u32);
+
+ if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "unknown TLV in chan_info event\n");
+ return;
+ }
+
+ status = le16_to_cpu(chan_info->status);
+
+ if (status) {
+ mwifiex_dbg(priv->adapter, EVENT,
+ "multi-channel operation started\n");
+ } else {
+ mwifiex_dbg(priv->adapter, EVENT,
+ "multi-channel operation over\n");
+ }
+}
+
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb)
+{
+ struct mwifiex_ie_types_header *tlv;
+ u16 tlv_type, tlv_len;
+ int tlv_buf_left;
+
+ if (!priv->media_connected) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tx_pause event while disconnected; bss_role=%d\n",
+ priv->bss_role);
+ return;
+ }
+
+ tlv_buf_left = event_skb->len - sizeof(u32);
+ tlv = (void *)event_skb->data + sizeof(u32);
+
+ while (tlv_buf_left >= (int)sizeof(struct mwifiex_ie_types_header)) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_len = le16_to_cpu(tlv->len);
+ if ((sizeof(struct mwifiex_ie_types_header) + tlv_len) >
+ tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "wrong tlv: tlvLen=%d, tlvBufLeft=%d\n",
+ tlv_len, tlv_buf_left);
+ break;
+ }
+ if (tlv_type == TLV_TYPE_TX_PAUSE) {
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
+ mwifiex_process_sta_tx_pause(priv, tlv);
+ else
+ mwifiex_process_uap_tx_pause(priv, tlv);
+ }
+
+ tlv_buf_left -= sizeof(struct mwifiex_ie_types_header) +
+ tlv_len;
+ tlv = (void *)((u8 *)tlv + tlv_len +
+ sizeof(struct mwifiex_ie_types_header));
+ }
+
+}
+
/*
* This function handles coex events generated by firmware
*/
@@ -359,7 +553,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_PS_AWAKE:
mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
- if (!adapter->pps_uapsd_mode &&
+ if (!adapter->pps_uapsd_mode && priv->port_open &&
priv->media_connected && adapter->sleep_period.period) {
adapter->pps_uapsd_mode = true;
mwifiex_dbg(adapter, EVENT,
@@ -438,6 +632,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_PORT_RELEASE:
mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
+ priv->port_open = true;
break;
case EVENT_EXT_SCAN_REPORT:
@@ -573,6 +768,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
break;
+ case EVENT_TX_DATA_PAUSE:
+ mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+ mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+ break;
+
+ case EVENT_MULTI_CHAN_INFO:
+ mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+ mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+ break;
+
case EVENT_TX_STATUS_REPORT:
mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
mwifiex_parse_tx_status_event(priv, adapter->event_body);
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 2faa1bc42abe..aa3d3c5ed07b 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -49,7 +49,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
tid = skb->priority;
tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
- if (status == TDLS_SETUP_COMPLETE) {
+ if (mwifiex_is_tdls_link_setup(status)) {
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
ra_list->tdls_link = true;
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -355,6 +355,7 @@ static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
extcap->ieee_hdr.len = 8;
memset(extcap->ext_capab, 0, 8);
extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+ extcap->ext_capab[3] |= WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH;
if (priv->adapter->is_hw_11ac_capable)
extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
@@ -1071,6 +1072,11 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
for (i = 0; i < MAX_NUM_TID; i++)
sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
}
+ if (sta_ptr->tdls_cap.extcap.ext_capab[3] &
+ WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) {
+ mwifiex_config_tdls_enable(priv);
+ mwifiex_config_tdls_cs_params(priv);
+ }
memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
@@ -1141,7 +1147,7 @@ int mwifiex_get_tdls_list(struct mwifiex_private *priv,
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
list_for_each_entry(sta_ptr, &priv->sta_list, list) {
- if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) {
+ if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) {
ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
peer++;
count++;
@@ -1295,7 +1301,7 @@ void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
if ((link_status == TDLS_NOT_SETUP) &&
(peer->tdls_status == TDLS_SETUP_INPROGRESS))
peer->failure_count++;
- else if (link_status == TDLS_SETUP_COMPLETE)
+ else if (mwifiex_is_tdls_link_setup(link_status))
peer->failure_count = 0;
peer->tdls_status = link_status;
@@ -1367,7 +1373,7 @@ void mwifiex_check_auto_tdls(unsigned long context)
if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) ||
!tdls_peer->rssi) &&
- tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
+ mwifiex_is_tdls_link_setup(tdls_peer->tdls_status)) {
tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
mwifiex_dbg(priv->adapter, MSG,
"teardown TDLS link,peer=%pM rssi=%d\n",
@@ -1416,3 +1422,67 @@ void mwifiex_clean_auto_tdls(struct mwifiex_private *priv)
mwifiex_flush_auto_tdls_list(priv);
}
}
+
+static int mwifiex_config_tdls(struct mwifiex_private *priv, u8 enable)
+{
+ struct mwifiex_tdls_config config;
+
+ config.enable = cpu_to_le16(enable);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+ ACT_TDLS_CS_ENABLE_CONFIG, 0, &config, true);
+}
+
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv)
+{
+ return mwifiex_config_tdls(priv, true);
+}
+
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv)
+{
+ return mwifiex_config_tdls(priv, false);
+}
+
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv)
+{
+ struct mwifiex_tdls_config_cs_params config_tdls_cs_params;
+
+ config_tdls_cs_params.unit_time = MWIFIEX_DEF_CS_UNIT_TIME;
+ config_tdls_cs_params.thr_otherlink = MWIFIEX_DEF_CS_THR_OTHERLINK;
+ config_tdls_cs_params.thr_directlink = MWIFIEX_DEF_THR_DIRECTLINK;
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+ ACT_TDLS_CS_PARAMS, 0,
+ &config_tdls_cs_params, true);
+}
+
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac)
+{
+ struct mwifiex_tdls_stop_cs_params stop_tdls_cs_params;
+
+ ether_addr_copy(stop_tdls_cs_params.peer_mac, peer_mac);
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+ ACT_TDLS_CS_STOP, 0,
+ &stop_tdls_cs_params, true);
+}
+
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+ u8 primary_chan, u8 second_chan_offset, u8 band)
+{
+ struct mwifiex_tdls_init_cs_params start_tdls_cs_params;
+
+ ether_addr_copy(start_tdls_cs_params.peer_mac, peer_mac);
+ start_tdls_cs_params.primary_chan = primary_chan;
+ start_tdls_cs_params.second_chan_offset = second_chan_offset;
+ start_tdls_cs_params.band = band;
+
+ start_tdls_cs_params.switch_time = cpu_to_le16(MWIFIEX_DEF_CS_TIME);
+ start_tdls_cs_params.switch_timeout =
+ cpu_to_le16(MWIFIEX_DEF_CS_TIMEOUT);
+ start_tdls_cs_params.reg_class = MWIFIEX_DEF_CS_REG_CLASS;
+ start_tdls_cs_params.periodicity = MWIFIEX_DEF_CS_PERIODICITY;
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+ ACT_TDLS_CS_INIT, 0,
+ &start_tdls_cs_params, true);
+}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 5ed9b794053e..8b1e5b5d47fe 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -370,8 +370,28 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
/* consumes ack_skb */
skb_complete_wifi_ack(ack_skb, !tx_status->status);
} else {
+ /* Remove broadcast address which was added by driver */
+ memmove(ack_skb->data +
+ sizeof(struct ieee80211_hdr_3addr) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16),
+ ack_skb->data +
+ sizeof(struct ieee80211_hdr_3addr) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+ ETH_ALEN, ack_skb->len -
+ (sizeof(struct ieee80211_hdr_3addr) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+ ETH_ALEN));
+ ack_skb->len = ack_skb->len - ETH_ALEN;
+ /* Remove driver's proprietary header including 2 bytes
+ * of packet length and pass actual management frame buffer
+ * to cfg80211.
+ */
cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie,
- ack_skb->data, ack_skb->len,
+ ack_skb->data +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ sizeof(u16), ack_skb->len -
+ (MWIFIEX_MGMT_FRAME_HEADER_SIZE
+ + sizeof(u16)),
!tx_status->status, GFP_ATOMIC);
dev_kfree_skb_any(ack_skb);
}
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index b74930054b8c..4d5a6e3b6361 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -808,7 +808,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_chan_def chandef)
{
- u8 config_bands = 0;
+ u8 config_bands = 0, old_bands = priv->adapter->config_bands;
priv->bss_chandef = chandef;
@@ -834,6 +834,11 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
}
priv->adapter->config_bands = config_bands;
+
+ if (old_bands != config_bands) {
+ mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
+ mwifiex_dnld_txpwr_table(priv);
+ }
}
int mwifiex_config_start_uap(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 7bc1f850e3b7..492a8b3c636e 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -176,6 +176,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_UAP_BSS_IDLE:
priv->media_connected = false;
+ priv->port_open = false;
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -185,6 +186,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_UAP_BSS_ACTIVE:
priv->media_connected = true;
+ priv->port_open = true;
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
@@ -192,6 +194,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
case EVENT_UAP_BSS_START:
mwifiex_dbg(adapter, EVENT,
"AP EVENT: event id: %#x\n", eventcause);
+ priv->port_open = false;
memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
ETH_ALEN);
if (priv->hist_data)
@@ -297,6 +300,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_bt_coex_wlan_param_update_event(priv,
adapter->event_skb);
break;
+ case EVENT_TX_DATA_PAUSE:
+ mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+ mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+ break;
+
+ case EVENT_MULTI_CHAN_INFO:
+ mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+ mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+ break;
+
default:
mwifiex_dbg(adapter, EVENT,
"event: unknown event id: %#x\n", eventcause);
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index aada93425f80..fbad99c50307 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -244,9 +244,11 @@ setup_for_next:
if (card->rx_cmd_ep == context->ep) {
mwifiex_usb_submit_rx_urb(context, size);
} else {
- context->skb = NULL;
- if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING)
+ if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){
mwifiex_usb_submit_rx_urb(context, size);
+ }else{
+ context->skb = NULL;
+ }
}
return;
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 790e61953abf..2504e422364a 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -531,6 +531,65 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac)
return NULL;
}
+static struct mwifiex_sta_node *
+mwifiex_get_tdls_sta_entry(struct mwifiex_private *priv, u8 status)
+{
+ struct mwifiex_sta_node *node;
+
+ list_for_each_entry(node, &priv->sta_list, list) {
+ if (node->tdls_status == status)
+ return node;
+ }
+
+ return NULL;
+}
+
+/* If tdls channel switching is on-going, tx data traffic should be
+ * blocked until the switching stage completed.
+ */
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *sta_ptr;
+
+ if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ return false;
+
+ sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_CHAN_SWITCHING);
+ if (sta_ptr)
+ return true;
+
+ return false;
+}
+
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *sta_ptr;
+
+ if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ return false;
+
+ sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_IN_OFF_CHAN);
+ if (sta_ptr)
+ return true;
+
+ return false;
+}
+
+/* If tdls channel switching is on-going or tdls operate on off-channel,
+ * cmd path should be blocked until tdls switched to base-channel.
+ */
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv)
+{
+ if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ return true;
+
+ if (mwifiex_is_tdls_chan_switching(priv) ||
+ mwifiex_is_tdls_off_chan(priv))
+ return false;
+
+ return true;
+}
+
/* This function will add a sta_node entry to associated station list
* table with the given mac address.
* If entry exist already, existing entry is returned.
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index a8ea21c3340c..173d3663c2e0 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -160,9 +160,10 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
ra_list->tdls_link = false;
ra_list->ba_status = BA_SETUP_NONE;
ra_list->amsdu_in_ampdu = false;
+ ra_list->tx_paused = false;
if (!mwifiex_queuing_ra_based(priv)) {
- if (mwifiex_get_tdls_link_status(priv, ra) ==
- TDLS_SETUP_COMPLETE) {
+ if (mwifiex_is_tdls_link_setup
+ (mwifiex_get_tdls_link_status(priv, ra))) {
ra_list->tdls_link = true;
ra_list->is_11n_enabled =
mwifiex_tdls_peer_11n_enabled(priv, ra);
@@ -448,6 +449,11 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
}
}
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
+{
+ return atomic_read(&adapter->bypass_tx_pending) ? false : true;
+}
+
/*
* This function checks if WMM Tx queue is empty.
*/
@@ -459,6 +465,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; ++i) {
priv = adapter->priv[i];
+ if (priv && !priv->port_open)
+ continue;
if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
return false;
}
@@ -580,6 +588,10 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ atomic_set(&priv->adapter->bypass_tx_pending, 0);
+
idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
idr_destroy(&priv->ack_status_frames);
}
@@ -603,6 +615,88 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
return NULL;
}
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+ u8 tx_pause)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ u32 pkt_cnt = 0, tx_pkts_queued;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; ++i) {
+ ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
+ if (ra_list && ra_list->tx_paused != tx_pause) {
+ pkt_cnt += ra_list->total_pkt_count;
+ ra_list->tx_paused = tx_pause;
+ if (tx_pause)
+ priv->wmm.pkts_paused[i] +=
+ ra_list->total_pkt_count;
+ else
+ priv->wmm.pkts_paused[i] -=
+ ra_list->total_pkt_count;
+ }
+ }
+
+ if (pkt_cnt) {
+ tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+ if (tx_pause)
+ tx_pkts_queued -= pkt_cnt;
+ else
+ tx_pkts_queued += pkt_cnt;
+
+ atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+ atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+ }
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+/* This function update non-tdls peer ralist tx_pause while
+ * tdls channel swithing
+ */
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+ u8 *mac, u8 tx_pause)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ u32 pkt_cnt = 0, tx_pkts_queued;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; ++i) {
+ list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
+ list) {
+ if (!memcmp(ra_list->ra, mac, ETH_ALEN))
+ continue;
+
+ if (ra_list && ra_list->tx_paused != tx_pause) {
+ pkt_cnt += ra_list->total_pkt_count;
+ ra_list->tx_paused = tx_pause;
+ if (tx_pause)
+ priv->wmm.pkts_paused[i] +=
+ ra_list->total_pkt_count;
+ else
+ priv->wmm.pkts_paused[i] -=
+ ra_list->total_pkt_count;
+ }
+ }
+ }
+
+ if (pkt_cnt) {
+ tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+ if (tx_pause)
+ tx_pkts_queued -= pkt_cnt;
+ else
+ tx_pkts_queued += pkt_cnt;
+
+ atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+ atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+ }
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
/*
* This function retrieves an RA list node for a given TID and
* RA address pair.
@@ -670,6 +764,18 @@ mwifiex_is_ralist_valid(struct mwifiex_private *priv,
}
/*
+ * This function adds a packet to bypass TX queue.
+ * This is special TX queue for packets which can be sent even when port_open
+ * is false.
+ */
+void
+mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ skb_queue_tail(&priv->bypass_txq, skb);
+}
+
+/*
* This function adds a packet to WMM queue.
*
* In disconnected state the packet is immediately dropped and the
@@ -723,6 +829,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
!mwifiex_is_skb_mgmt_frame(skb)) {
switch (tdls_status) {
case TDLS_SETUP_COMPLETE:
+ case TDLS_CHAN_SWITCHING:
+ case TDLS_IN_BASE_CHAN:
+ case TDLS_IN_OFF_CHAN:
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
ra);
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -765,7 +874,10 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
atomic_set(&priv->wmm.highest_queued_prio,
priv->tos_to_tid_inv[tid_down]);
- atomic_inc(&priv->wmm.tx_pkts_queued);
+ if (ra_list->tx_paused)
+ priv->wmm.pkts_paused[tid_down]++;
+ else
+ atomic_inc(&priv->wmm.tx_pkts_queued);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
@@ -970,7 +1082,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
- if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
+ if (!priv_tmp->port_open ||
+ (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
continue;
/* iterate over the WMM queues of the BSS */
@@ -987,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
list_for_each_entry(ptr, &tid_ptr->ra_list,
list) {
- if (!skb_queue_empty(&ptr->skb_head))
+ if (!ptr->tx_paused &&
+ !skb_queue_empty(&ptr->skb_head))
/* holds both locks */
goto found;
}
@@ -1339,6 +1453,38 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
return 0;
}
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
+{
+ struct mwifiex_tx_param tx_param;
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct mwifiex_private *priv;
+ int i;
+
+ if (adapter->data_sent || adapter->tx_lock_flag)
+ return;
+
+ for (i = 0; i < adapter->priv_num; ++i) {
+ priv = adapter->priv[i];
+
+ if (skb_queue_empty(&priv->bypass_txq))
+ continue;
+
+ skb = skb_dequeue(&priv->bypass_txq);
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+
+ /* no aggregation for bypass packets */
+ tx_param.next_pkt_len = 0;
+
+ if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
+ skb_queue_head(&priv->bypass_txq, skb);
+ tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
+ } else {
+ atomic_dec(&adapter->bypass_tx_pending);
+ }
+ }
+}
+
/*
* This function transmits the highest priority packet awaiting in the
* WMM Queues.
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 48ece0b35591..38f09762bd2f 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -99,12 +99,16 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct sk_buff *skb);
+void mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+ struct sk_buff *skb);
void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
void mwifiex_rotate_priolists(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra, int tid);
int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter);
void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter);
int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list, int tid);
@@ -126,6 +130,10 @@ struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
const u8 *ra_addr);
u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+ u8 tx_pause);
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+ u8 *mac, u8 tx_pause);
struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
*priv, u8 tid, const u8 *ra_addr);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
index c940a87175ca..74a479ac323d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -32,24 +32,15 @@
/*-------------------------------------------------------------------------
* Chip specific
*-------------------------------------------------------------------------*/
-#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */
-#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */
#define NORMAL_CHIP BIT(4)
#define CHIP_VENDOR_UMC BIT(5)
#define CHIP_VENDOR_UMC_B_CUT BIT(6)
-#define IS_8723_SERIES(version) \
- (((version) & CHIP_8723) ? true : false)
-
#define IS_92C_1T2R(version) \
(((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
#define IS_VENDOR_UMC(version) \
(((version) & CHIP_VENDOR_UMC) ? true : false)
-#define IS_VENDOR_8723_A_CUT(version) \
- (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
- false : true) : false)
-
#define CHIP_BONDING_92C_1T2R 0x1
#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 767358a553fb..7cf36619f250 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -2280,7 +2280,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
u8 u1tmp = 0;
bool actuallyset = false;
@@ -2357,20 +2356,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
/* Enable register area 0x0-0xc. */
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
- if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
- /*
- * We should configure HW PDn source for WiFi
- * ONLY, and then our HW will be set in
- * power-down mode if PDn source from all
- * functions are configured.
- */
- u1tmp = rtl_read_byte(rtlpriv,
- REG_MULTI_FUNC_CTRL);
- rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
- (u1tmp|WL_HWPDN_EN));
- } else {
- rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
- }
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
}
if (e_rfpowerstate_toset == ERFOFF) {
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 490a7cf7c702..1c55a002d4bd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -69,8 +69,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
chip_version = NORMAL_CHIP;
chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
- /* RTL8723 with BT function. */
- chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
if (IS_VENDOR_UMC(chip_version))
chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
CHIP_VENDOR_UMC_B_CUT : 0);
@@ -78,10 +76,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
- } else if (IS_8723_SERIES(chip_version)) {
- value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
- chip_version |= ((value32 & RF_RL_ID) ?
- CHIP_8723_DRV_REV : 0);
}
}
rtlhal->version = (enum version_8192c)chip_version;
@@ -114,12 +108,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
versionid = "NORMAL_UMC_CHIP_88C_B_CUT";
break;
- case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
- versionid = "NORMAL_UMC_CHIP_8723_1T1R_A_CUT";
- break;
- case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
- versionid = "NORMAL_UMC_CHIP_8723_1T1R_B_CUT";
- break;
case VERSION_TEST_CHIP_92C:
versionid = "TEST_CHIP_92C";
break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 1961b8e28dc1..bb06fe836fe7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3515,14 +3515,14 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
rfpath++) {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+ /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) |
BIT(18), 0);
/* RF0x0b[16:14] =3b'111 */
rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
0x1c000, 0x07);
} else {
- /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+ /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) |
BIT(16) | BIT(18),
(BIT(16) | BIT(8)) >> 8);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 3236d44b459d..b7f18e2155eb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, MSR, bt_msr);
rtlpriv->cfg->ops->led_control(hw, ledaction);
- if ((bt_msr & 0xfc) == MSR_AP)
+ if ((bt_msr & MSR_MASK) == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
else
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
index 53668fc8f23e..1d6110f9c1fb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
@@ -429,6 +429,7 @@
#define MSR_ADHOC 0x01
#define MSR_INFRA 0x02
#define MSR_AP 0x03
+#define MSR_MASK 0x03
#define RRSR_RSC_OFFSET 21
#define RRSR_SHORT_OFFSET 23
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index e125974285cc..7df672a84530 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -74,7 +74,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
if (desc->rate <= wl->hw_min_ht_rate)
status->flag |= RX_FLAG_HT;
- status->signal = desc->rssi;
+ status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
+ status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
/*
* FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index a3b1618db27c..f5a7087cfb97 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -30,6 +30,9 @@
#define WL1271_RX_MAX_RSSI -30
#define WL1271_RX_MIN_RSSI -95
+#define RSSI_LEVEL_BITMASK 0x7F
+#define ANT_DIVERSITY_BITMASK BIT(7)
+
#define SHORT_PREAMBLE_BIT BIT(0)
#define OFDM_RATE_BIT BIT(6)
#define PBCC_RATE_BIT BIT(7)
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index ea7e07abca4e..c172da56b550 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -293,7 +293,8 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- if (wlcore_probe_of(&func->dev, &irq, &pdev_data))
+ ret = wlcore_probe_of(&func->dev, &irq, &pdev_data);
+ if (ret)
goto out_free_glue;
/* if sdio can keep power while host is suspended, enable wow */
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a495b318b6f..c6cb85a85c89 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -325,9 +325,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
queue->pending_prod + queue->pending_cons;
}
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
-
irqreturn_t xenvif_interrupt(int irq, void *dev_id);
extern bool separate_tx_rx_irq;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index fdc60db60829..7c8c23cc6896 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -266,7 +266,8 @@ EXPORT_SYMBOL(of_phy_attach);
bool of_phy_is_fixed_link(struct device_node *np)
{
struct device_node *dn;
- int len;
+ int len, err;
+ const char *managed;
/* New binding */
dn = of_get_child_by_name(np, "fixed-link");
@@ -275,6 +276,10 @@ bool of_phy_is_fixed_link(struct device_node *np)
return true;
}
+ err = of_property_read_string(np, "managed", &managed);
+ if (err == 0 && strcmp(managed, "auto") != 0)
+ return true;
+
/* Old binding */
if (of_get_property(np, "fixed-link", &len) &&
len == (5 * sizeof(__be32)))
@@ -289,8 +294,18 @@ int of_phy_register_fixed_link(struct device_node *np)
struct fixed_phy_status status = {};
struct device_node *fixed_link_node;
const __be32 *fixed_link_prop;
- int len;
+ int len, err;
struct phy_device *phy;
+ const char *managed;
+
+ err = of_property_read_string(np, "managed", &managed);
+ if (err == 0) {
+ if (strcmp(managed, "in-band-status") == 0) {
+ /* status is zeroed, namely its .link member */
+ phy = fixed_phy_register(PHY_POLL, &status, np);
+ return IS_ERR(phy) ? PTR_ERR(phy) : 0;
+ }
+ }
/* New binding */
fixed_link_node = of_get_child_by_name(np, "fixed-link");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 98d06d151958..d5cdc4776707 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2051,9 +2051,49 @@ static int bnx2fc_disable(struct net_device *netdev)
return rc;
}
+static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
+ struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+ struct fc_vport_identifiers vpid;
+ uint i, created = 0;
+
+ if (npiv_tbl->count > MAX_NPIV_ENTRIES) {
+ BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n");
+ goto done;
+ }
+
+ /* Sanity check the first entry to make sure it's not 0 */
+ if (wwn_to_u64(npiv_tbl->wwnn[0]) == 0 &&
+ wwn_to_u64(npiv_tbl->wwpn[0]) == 0) {
+ BNX2FC_HBA_DBG(lport, "First NPIV table entries invalid.\n");
+ goto done;
+ }
+
+ vpid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vpid.vport_type = FC_PORTTYPE_NPIV;
+ vpid.disable = false;
+
+ for (i = 0; i < npiv_tbl->count; i++) {
+ vpid.node_name = wwn_to_u64(npiv_tbl->wwnn[i]);
+ vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]);
+ scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name),
+ "NPIV[%u]:%016llx-%016llx",
+ created, vpid.port_name, vpid.node_name);
+ if (fc_vport_create(lport->host, 0, &vpid))
+ created++;
+ else
+ BNX2FC_HBA_DBG(lport, "Failed to create vport\n");
+ }
+done:
+ return created;
+}
+
static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
{
struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+ struct bnx2fc_hba *hba;
+ struct cnic_fc_npiv_tbl npiv_tbl;
+ struct fc_lport *lport;
if (interface->enabled == false) {
if (!ctlr->lp) {
@@ -2064,6 +2104,32 @@ static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
interface->enabled = true;
}
}
+
+ /* Create static NPIV ports if any are contained in NVRAM */
+ hba = interface->hba;
+ lport = ctlr->lp;
+
+ if (!hba)
+ goto done;
+
+ if (!hba->cnic)
+ goto done;
+
+ if (!lport)
+ goto done;
+
+ if (!lport->host)
+ goto done;
+
+ if (!hba->cnic->get_fc_npiv_tbl)
+ goto done;
+
+ memset(&npiv_tbl, 0, sizeof(npiv_tbl));
+ if (hba->cnic->get_fc_npiv_tbl(hba->cnic, &npiv_tbl))
+ goto done;
+
+ bnx2fc_npiv_create_vports(lport, &npiv_tbl);
+done:
return 0;
}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4383476a0d48..f57d7fed9ec3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/file.h>
+#include <linux/perf_event.h>
struct bpf_map;
@@ -24,6 +25,10 @@ struct bpf_map_ops {
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
+
+ /* funcs called by prog_array and perf_event_array map */
+ void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
+ void (*map_fd_put_ptr) (void *ptr);
};
struct bpf_map {
@@ -142,13 +147,13 @@ struct bpf_array {
bool owner_jited;
union {
char value[0] __aligned(8);
- struct bpf_prog *prog[0] __aligned(8);
+ void *ptrs[0] __aligned(8);
};
};
#define MAX_TAIL_CALL_CNT 32
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-void bpf_prog_array_map_clear(struct bpf_map *map);
+void bpf_fd_array_map_clear(struct bpf_map *map);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -185,6 +190,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_perf_event_read_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -192,5 +198,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 17724f6ea983..fa2cab985e57 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/printk.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -354,6 +355,16 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
offsetof(struct bpf_prog, insns[proglen]));
}
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+ /* When classic BPF programs have been loaded and the arch
+ * does not have a classic BPF JIT (anymore), they have been
+ * converted via bpf_migrate_filter() to eBPF and thus always
+ * have an unspec program type.
+ */
+ return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
+bool bpf_helper_changes_skb_data(void *func);
#ifdef CONFIG_BPF_JIT
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@ void bpf_jit_free(struct bpf_prog *fp);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
- pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
- flen, proglen, pass, image);
+ pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+ proglen, pass, image, current->comm, task_pid_nr(current));
+
if (image)
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
16, 1, image, proglen, false);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 82806c60aa42..cb9dcad72372 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -29,6 +29,7 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
#ifdef CONFIG_IPV6_ROUTER_PREF
__s32 accept_ra_rtr_pref;
@@ -57,6 +58,7 @@ struct ipv6_devconf {
bool initialized;
struct in6_addr secret;
} stable_secret;
+ __s32 use_oif_addrs_only;
void *sysctl;
};
@@ -94,7 +96,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
struct inet6_skb_parm {
int iif;
__be16 ra;
- __u16 hop;
__u16 dst0;
__u16 srcrt;
__u16 dst1;
@@ -111,6 +112,7 @@ struct inet6_skb_parm {
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
#define IP6SKB_FRAGMENTED 16
+#define IP6SKB_HOPBYHOP 32
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index e7ecc12a1163..09cebe528488 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
enum {
MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
- MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30,
MLX4_CQE_L2_TUNNEL = 1 << 27,
MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fd13c1ce3b4a..bcbf8c72a77b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -211,6 +211,8 @@ enum {
MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
+ MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
};
enum {
@@ -581,6 +583,7 @@ struct mlx4_caps {
u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode;
u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+ u8 phv_bit[MLX4_MAX_PORTS + 1];
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
@@ -1332,6 +1335,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
u8 ignore_fcs_value);
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6fed539e5456..de45a51b3f04 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -272,7 +272,8 @@ enum {
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
- MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b943cd9e2097..250b1ff8b48d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1182,6 +1182,16 @@ enum {
MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
};
+enum {
+ MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
+ MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
+ MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
+ MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
+ MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+ MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
+ MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+};
+
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5722d88c2429..2039546b0ec6 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -103,6 +103,7 @@ enum {
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
MLX5_REG_PAOS = 0x5006,
+ MLX5_REG_PPCNT = 0x5008,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
@@ -380,7 +381,7 @@ struct mlx5_uar {
u32 index;
struct list_head bf_list;
unsigned free_bf_bmap;
- void __iomem *wc_map;
+ void __iomem *bf_map;
void __iomem *map;
};
@@ -435,6 +436,8 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
+ struct io_mapping *bf_mapping;
+
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
@@ -463,6 +466,10 @@ struct mlx5_priv {
/* end: mr staff */
/* start: alloc staff */
+ /* protect buffer alocation according to numa node */
+ struct mutex alloc_mutex;
+ int numa_node;
+
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
@@ -672,6 +679,8 @@ void mlx5_health_cleanup(void);
void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -773,6 +782,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+ int node);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d2f6fee041c..dd2097455a2e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1936,9 +1936,9 @@ enum {
};
enum {
- MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
- MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
- MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
+ MLX5_RX_HASH_FN_NONE = 0x0,
+ MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
+ MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
};
enum {
@@ -4050,6 +4050,13 @@ struct mlx5_ifc_modify_tis_in_bits {
struct mlx5_ifc_tisc_bits ctx;
};
+struct mlx5_ifc_modify_tir_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 lro[0x1];
+};
+
struct mlx5_ifc_modify_tir_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -4071,7 +4078,7 @@ struct mlx5_ifc_modify_tir_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
u8 reserved_4[0x40];
@@ -4116,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits {
u8 reserved_1[0x40];
};
+struct mlx5_ifc_rqt_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 rqn_list[0x1];
+};
+
struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -4128,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_rqt_bitmask_bits bitmask;
u8 reserved_4[0x40];
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644
index 000000000000..ef29eb2d6dfd
--- /dev/null
+++ b/include/linux/mpls_iptunnel.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_IPTUNNEL_H
+#define _LINUX_MPLS_IPTUNNEL_H
+
+#include <uapi/linux/mpls_iptunnel.h>
+
+#endif /* _LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 04aa06852771..049d4b03c4c4 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -239,8 +239,16 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#if defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+#else
+#define net_dbg_ratelimited(fmt, ...) \
+ do { \
+ if (0) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ } while (0)
+#endif
bool __net_get_random_once(void *buf, int nbytes, bool *done,
struct static_key *done_key);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e20979dfd6a9..607b5f41f46f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -766,6 +766,13 @@ struct netdev_phys_item_id {
unsigned char id_len;
};
+static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
+ struct netdev_phys_item_id *b)
+{
+ return a->id_len == b->id_len &&
+ memcmp(a->id, b->id, a->id_len) == 0;
+}
+
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
@@ -1041,6 +1048,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
+ * void (*ndo_change_proto_down)(struct net_device *dev,
+ * bool proto_down);
+ * This function is used to pass protocol port error state information
+ * to the switch driver. The switch driver can react to the proto_down
+ * by doing a phys down on the associated switch port.
+ *
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1211,6 +1224,8 @@ struct net_device_ops {
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
+ int (*ndo_change_proto_down)(struct net_device *dev,
+ bool proto_down);
};
/**
@@ -1448,6 +1463,8 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
+ * @offload_fwd_mark: Offload device fwding mark
+ *
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog ( see dev_watchdog() )
@@ -1502,6 +1519,10 @@ enum netdev_priv_flags {
*
* @qdisc_tx_busylock: XXX: need comments on this one
*
+ * @proto_down: protocol port state information can be sent to the
+ * switch driver and used to set the phys state of the
+ * switch port.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1685,6 +1706,10 @@ struct net_device {
struct xps_dev_maps __rcu *xps_maps;
#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ u32 offload_fwd_mark;
+#endif
+
/* These may be needed for future network-power-down code. */
/*
@@ -1762,6 +1787,7 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2982,6 +3008,7 @@ int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 00050dfd9f23..d788ce62d826 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,8 @@
#include <linux/list.h>
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
@@ -118,6 +120,13 @@ struct nf_sockopt_ops {
};
/* Function to register/unregister hook points. */
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+
int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -128,33 +137,26 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
-extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook))
return static_key_false(&nf_hooks_needed[pf][hook]);
- return !list_empty(nf_hook_list);
+ return !list_empty(hook_list);
}
#else
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
u_int8_t pf, unsigned int hook)
{
- return !list_empty(nf_hook_list);
+ return !list_empty(hook_list);
}
#endif
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
-{
- return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
-}
-
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
@@ -172,10 +174,13 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct sock *, struct sk_buff *),
int thresh)
{
- if (nf_hooks_active(pf, hook)) {
+ struct net *net = dev_net(indev ? indev : outdev);
+ struct list_head *hook_list = &net->nf.hooks[pf][hook];
+
+ if (nf_hook_list_active(hook_list, pf, hook)) {
struct nf_hook_state state;
- nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+ nf_hook_state_init(&state, hook_list, hook, thresh,
pf, indev, outdev, sk, okfn);
return nf_hook_slow(skb, &state);
}
@@ -385,4 +390,15 @@ extern struct nfq_ct_hook __rcu *nfq_ct_hook;
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
+/**
+ * nf_skb_duplicated - TEE target has sent a packet
+ *
+ * When a xtables target sends a packet, the OUTPUT and POSTROUTING
+ * hooks are traversed again, i.e. nft and xtables are invoked recursively.
+ *
+ * This is used by xtables TEE target to prevent the duplicated skb from
+ * being duplicated again.
+ */
+DECLARE_PER_CPU(bool, nf_skb_duplicated);
+
#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 286098a5667f..b006b719183f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
#include <linux/netdevice.h>
+#include <linux/static_key.h>
#include <uapi/linux/netfilter/x_tables.h>
/**
@@ -222,7 +223,6 @@ struct xt_table_info {
* @stacksize jumps (number of user chains) can possibly be made.
*/
unsigned int stacksize;
- unsigned int __percpu *stackptr;
void ***jumpstack;
unsigned char entries[0] __aligned(8);
@@ -281,6 +281,12 @@ void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
+/* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+ */
+extern struct static_key xt_tee_enabled;
+
/**
* xt_write_recseq_begin - start of a write section
*
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 6d80fc686323..2437b8a5d7a9 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,9 +17,6 @@ enum nf_br_hook_priorities {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-#define BRNF_BRIDGED_DNAT 0x02
-#define BRNF_NF_BRIDGE_PREROUTING 0x08
-
int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
static inline void br_drop_fake_rtable(struct sk_buff *skb)
@@ -63,8 +60,17 @@ nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
}
+
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+}
#else
#define br_drop_fake_rtable(skb) do { } while (0)
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return false;
+}
#endif /* CONFIG_BRIDGE_NETFILTER */
#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2027809433b3..092a0e8a479a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -641,6 +641,8 @@ extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
+extern struct perf_event *perf_event_get(unsigned int fd);
+extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
@@ -659,6 +661,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
+extern u64 perf_event_read_local(struct perf_event *event);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -979,6 +982,12 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
+static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
+static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
@@ -1011,6 +1020,7 @@ static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
+static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a26c3f84b8dd..e5fb1d415961 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -424,6 +424,8 @@ struct phy_device {
struct net_device *attached_dev;
+ u8 mdix;
+
void (*adjust_link)(struct net_device *dev);
};
#define to_phy_device(d) container_of(d, struct phy_device, dev)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 22b6d9ca1654..065e10b81a77 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
#include <net/flow_dissector.h>
#include <linux/splice.h>
#include <linux/in6.h>
+#include <net/flow.h>
/* A. Checksumming of received packets by device.
*
@@ -173,17 +174,24 @@ struct nf_bridge_info {
BRNF_PROTO_8021Q,
BRNF_PROTO_PPPOE
} orig_proto:8;
- bool pkt_otherhost;
+ u8 pkt_otherhost:1;
+ u8 in_prerouting:1;
+ u8 bridged_dnat:1;
__u16 frag_max_size;
- unsigned int mask;
struct net_device *physindev;
union {
- struct net_device *physoutdev;
- char neigh_header[8];
- };
- union {
+ /* prerouting: detect dnat in orig/reply direction */
__be32 ipv4_daddr;
struct in6_addr ipv6_daddr;
+
+ /* after prerouting + nat detected: store original source
+ * mac since neigh resolution overwrites it, only used while
+ * skb is out in neigh layer.
+ */
+ char neigh_header[8];
+
+ /* always valid & non-NULL from FORWARD on, for physdev match */
+ struct net_device *physoutdev;
};
};
#endif
@@ -506,6 +514,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
+ * @offload_fwd_mark: fwding offload mark
* @mark: Generic packet mark
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
@@ -650,9 +659,15 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
+ union {
#ifdef CONFIG_NETWORK_SECMARK
- __u32 secmark;
+ __u32 secmark;
+#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ __u32 offload_fwd_mark;
#endif
+ };
+
union {
__u32 mark;
__u32 reserved_tailroom;
@@ -938,6 +953,26 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6);
+
+static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6)
+{
+ if (!skb->l4_hash && !skb->sw_hash)
+ __skb_get_hash_flowi6(skb, fl6);
+
+ return skb->hash;
+}
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl);
+
+static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4)
+{
+ if (!skb->l4_hash && !skb->sw_hash)
+ __skb_get_hash_flowi4(skb, fl4);
+
+ return skb->hash;
+}
+
__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
@@ -2671,12 +2706,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
skb_shinfo(skb)->frag_list = NULL;
}
-static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
-{
- frag->next = skb_shinfo(skb)->frag_list;
- skb_shinfo(skb)->frag_list = frag;
-}
-
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
@@ -3468,5 +3497,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c735f5c91eea..eead8ab93c0a 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -119,30 +119,8 @@ struct plat_stmmacenet_data {
int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
- void *custom_cfg;
- void *custom_data;
void *bsp_priv;
};
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
- int has_gmac;
- int enh_desc;
- int tx_coe;
- int rx_coe;
- int bugged_jumbo;
- int pmt;
- int riwt_off;
- void (*fix_mac_speed)(void *priv, unsigned int speed);
- void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
- int (*init)(struct platform_device *pdev, void *priv);
- void (*exit)(struct platform_device *pdev, void *priv);
-};
#endif
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 931738bc5bba..4519c81304bd 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -21,6 +21,8 @@ struct tcf_common {
struct gnet_stats_rate_est64 tcfc_rate_est;
spinlock_t tcfc_lock;
struct rcu_head tcfc_rcu;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
};
#define tcf_head common.tcfc_head
#define tcf_index common.tcfc_index
@@ -68,6 +70,17 @@ static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
kfree(hf->htab);
}
+/* Update lastuse only if needed, to avoid dirtying a cache line.
+ * We use a temp variable to avoid fetching jiffies twice.
+ */
+static inline void tcf_lastuse_update(struct tcf_t *tm)
+{
+ unsigned long now = jiffies;
+
+ if (tm->lastuse != now)
+ tm->lastuse = now;
+}
+
#ifdef CONFIG_NET_CLS_ACT
#define ACT_P_CREATED 1
@@ -102,7 +115,7 @@ void tcf_hash_destroy(struct tc_action *a);
u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
int tcf_hash_check(u32 index, struct tc_action *a, int bind);
int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
- int size, int bind);
+ int size, int bind, bool cpustats);
void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
void tcf_hash_insert(struct tc_action *a);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index def59d3a34d5..0c3ac5acb85f 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -158,8 +158,8 @@ struct ipv6_stub {
const struct in6_addr *addr);
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
const struct in6_addr *addr);
- int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
- struct flowi6 *fl6);
+ int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+ struct dst_entry **dst, struct flowi6 *fl6);
void (*udpv6_encap_enable)(void);
void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *daddr,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3bd618d3e55d..2a6b0919e23f 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1297,7 +1297,7 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
if (max >= to_multiplier * 8)
return -EINVAL;
- max_latency = (to_multiplier * 8 / max) - 1;
+ max_latency = (to_multiplier * 4 / max) - 1;
if (latency > 499 || latency > max_latency)
return -EINVAL;
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 2239a3753092..c98afc08cc26 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -55,6 +55,8 @@
#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000)
#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000)
+#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
+#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
#define L2CAP_A2MP_DEFAULT_MTU 670
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index c28aca25320e..1797235cd590 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -66,6 +66,7 @@ enum {
BOND_OPT_AD_ACTOR_SYS_PRIO,
BOND_OPT_AD_ACTOR_SYSTEM,
BOND_OPT_AD_USER_PORT_KEY,
+ BOND_OPT_NUM_PEER_NOTIF_ALIAS,
BOND_OPT_LAST
};
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 290a9a69af07..382f94b59f2f 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -34,6 +34,8 @@ struct cfg802154_ops {
int type);
void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
struct net_device *dev);
+ int (*suspend)(struct wpan_phy *wpan_phy);
+ int (*resume)(struct wpan_phy *wpan_phy);
int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
const char *name,
unsigned char name_assign_type,
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c15d39456e14..ccd6d8bffa4d 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
if (classid != sk->sk_classid)
sk->sk_classid = classid;
}
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+ u32 classid = task_cls_state(current)->classid;
+
+ /* Due to the nature of the classifier it is required to ignore all
+ * packets originating from softirq context as accessing `current'
+ * would lead to false results.
+ *
+ * This test assumes that all callers of dev_queue_xmit() explicitly
+ * disable bh. Knowing this, it is possible to detect softirq based
+ * calls by looking at the number of nested bh disable calls because
+ * softirqs always disables bh.
+ */
+ if (in_serving_softirq()) {
+ /* If there is an sk_classid we'll use that. */
+ if (!skb->sk)
+ return 0;
+
+ classid = skb->sk->sk_classid;
+ }
+
+ return classid;
+}
#else /* !CONFIG_CGROUP_NET_CLASSID */
static inline void sock_update_classid(struct sock *sk)
{
}
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+ return 0;
+}
#endif /* CONFIG_CGROUP_NET_CLASSID */
#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fbca63ba8f73..6356f437e911 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -296,12 +296,17 @@ struct dsa_switch_driver {
u32 br_port_mask);
int (*port_stp_update)(struct dsa_switch *ds, int port,
u8 state);
- int (*fdb_add)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
- int (*fdb_del)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
- int (*fdb_getnext)(struct dsa_switch *ds, int port,
- unsigned char *addr, bool *is_static);
+
+ /*
+ * Forwarding database
+ */
+ int (*port_fdb_add)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*port_fdb_del)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*port_fdb_getnext)(struct dsa_switch *ds, int port,
+ unsigned char *addr, u16 *vid,
+ bool *is_static);
};
void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index 2bc73f8a00a9..2578811cef51 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -57,6 +57,7 @@ struct dst_entry {
#define DST_FAKE_RTABLE 0x0040
#define DST_XFRM_TUNNEL 0x0080
#define DST_XFRM_QUEUE 0x0100
+#define DST_METADATA 0x0200
unsigned short pending_confirm;
@@ -356,6 +357,9 @@ static inline int dst_discard(struct sk_buff *skb)
}
void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
int initial_obsolete, unsigned short flags);
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+ struct net_device *dev, int initial_ref, int initial_obsolete,
+ unsigned short flags);
void __dst_free(struct dst_entry *dst);
struct dst_entry *dst_destroy(struct dst_entry *dst);
@@ -457,7 +461,7 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
-void dst_init(void);
+void dst_subsys_init(void);
/* Flags for xfrm_lookup flags argument. */
enum {
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
new file mode 100644
index 000000000000..075f523ff23f
--- /dev/null
+++ b/include/net/dst_metadata.h
@@ -0,0 +1,56 @@
+#ifndef __NET_DST_METADATA_H
+#define __NET_DST_METADATA_H 1
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+#include <net/dst.h>
+
+struct metadata_dst {
+ struct dst_entry dst;
+ size_t opts_len;
+ union {
+ struct ip_tunnel_info tun_info;
+ } u;
+};
+
+static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
+
+ if (md_dst && md_dst->dst.flags & DST_METADATA)
+ return md_dst;
+
+ return NULL;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb,
+ int family)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct rtable *rt;
+
+ if (md_dst)
+ return &md_dst->u.tun_info;
+
+ switch (family) {
+ case AF_INET:
+ rt = (struct rtable *)skb_dst(skb);
+ if (rt && rt->rt_lwtstate)
+ return lwt_tun_info(rt->rt_lwtstate);
+ break;
+ }
+
+ return NULL;
+}
+
+static inline bool skb_valid_dst(const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ return dst && !(dst->flags & DST_METADATA);
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
+
+#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 903a55efbffe..4e8f804f4589 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -19,6 +19,7 @@ struct fib_rule {
u8 action;
/* 3 bytes hole, try to use */
u32 target;
+ __be64 tun_id;
struct fib_rule __rcu *ctarget;
struct net *fr_net;
diff --git a/include/net/flow.h b/include/net/flow.h
index 8109a159d1b3..3098ae33a178 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -19,6 +19,10 @@
#define LOOPBACK_IFINDEX 1
+struct flowi_tunnel {
+ __be64 tun_id;
+};
+
struct flowi_common {
int flowic_oif;
int flowic_iif;
@@ -30,6 +34,7 @@ struct flowi_common {
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
__u32 flowic_secid;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
@@ -66,6 +71,7 @@ struct flowi4 {
#define flowi4_proto __fl_common.flowic_proto
#define flowi4_flags __fl_common.flowic_flags
#define flowi4_secid __fl_common.flowic_secid
+#define flowi4_tun_key __fl_common.flowic_tun_key
/* (saddr,daddr) must be grouped, same order as in IP header */
__be32 saddr;
@@ -95,6 +101,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->flowi4_proto = proto;
fl4->flowi4_flags = flags;
fl4->flowi4_secid = 0;
+ fl4->flowi4_tun_key.tun_id = 0;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
@@ -165,6 +172,7 @@ struct flowi {
#define flowi_proto u.__fl_common.flowic_proto
#define flowi_flags u.__fl_common.flowic_flags
#define flowi_secid u.__fl_common.flowic_secid
+#define flowi_tun_key u.__fl_common.flowic_tun_key
} __attribute__((__aligned__(BITS_PER_LONG/8)));
static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
diff --git a/include/net/gre.h b/include/net/gre.h
index b53182018743..97eafdc47eea 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -4,6 +4,12 @@
#include <linux/skbuff.h>
#include <net/ip_tunnels.h>
+struct gre_base_hdr {
+ __be16 flags;
+ __be16 protocol;
+};
+#define GRE_HEADER_SECTION 4
+
#define GREPROTO_CISCO 0
#define GREPROTO_PPTP 1
#define GREPROTO_MAX 2
@@ -14,91 +20,9 @@ struct gre_protocol {
void (*err_handler)(struct sk_buff *skb, u32 info);
};
-struct gre_base_hdr {
- __be16 flags;
- __be16 protocol;
-};
-#define GRE_HEADER_SECTION 4
-
int gre_add_protocol(const struct gre_protocol *proto, u8 version);
int gre_del_protocol(const struct gre_protocol *proto, u8 version);
-struct gre_cisco_protocol {
- int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
- int (*err_handler)(struct sk_buff *skb, u32 info,
- const struct tnl_ptk_info *tpi);
- u8 priority;
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *proto);
-int gre_cisco_unregister(struct gre_cisco_protocol *proto);
-
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
- int hdr_len);
-
-static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
- bool csum)
-{
- return iptunnel_handle_offloads(skb, csum,
- csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
-}
-
-
-static inline int ip_gre_calc_hlen(__be16 o_flags)
-{
- int addend = 4;
-
- if (o_flags&TUNNEL_CSUM)
- addend += 4;
- if (o_flags&TUNNEL_KEY)
- addend += 4;
- if (o_flags&TUNNEL_SEQ)
- addend += 4;
- return addend;
-}
-
-static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
-{
- __be16 tflags = 0;
-
- if (flags & GRE_CSUM)
- tflags |= TUNNEL_CSUM;
- if (flags & GRE_ROUTING)
- tflags |= TUNNEL_ROUTING;
- if (flags & GRE_KEY)
- tflags |= TUNNEL_KEY;
- if (flags & GRE_SEQ)
- tflags |= TUNNEL_SEQ;
- if (flags & GRE_STRICT)
- tflags |= TUNNEL_STRICT;
- if (flags & GRE_REC)
- tflags |= TUNNEL_REC;
- if (flags & GRE_VERSION)
- tflags |= TUNNEL_VERSION;
-
- return tflags;
-}
-
-static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
-{
- __be16 flags = 0;
-
- if (tflags & TUNNEL_CSUM)
- flags |= GRE_CSUM;
- if (tflags & TUNNEL_ROUTING)
- flags |= GRE_ROUTING;
- if (tflags & TUNNEL_KEY)
- flags |= GRE_KEY;
- if (tflags & TUNNEL_SEQ)
- flags |= GRE_SEQ;
- if (tflags & TUNNEL_STRICT)
- flags |= GRE_STRICT;
- if (tflags & TUNNEL_REC)
- flags |= GRE_REC;
- if (tflags & TUNNEL_VERSION)
- flags |= GRE_VERSION;
-
- return flags;
-}
-
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+ u8 name_assign_type);
#endif
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b73c88a19dd4..b07d126694a7 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -205,8 +205,8 @@ void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+void __inet_hash(struct sock *sk, struct sock *osk);
void inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 360c4802288d..879d6e5a973b 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -100,10 +100,8 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
void inet_twsk_free(struct inet_timewait_sock *tw);
void inet_twsk_put(struct inet_timewait_sock *tw);
-int inet_twsk_unhash(struct inet_timewait_sock *tw);
-
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
- struct inet_hashinfo *hashinfo);
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ struct inet_hashinfo *hashinfo);
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
@@ -113,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo);
void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw);
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family);
diff --git a/include/net/ip.h b/include/net/ip.h
index d5fe9f2ab699..bee5f3582e38 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -370,22 +370,6 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
-static inline void inet_set_txhash(struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
- struct flow_keys keys;
-
- memset(&keys, 0, sizeof(keys));
-
- keys.addrs.v4addrs.src = inet->inet_saddr;
- keys.addrs.v4addrs.dst = inet->inet_daddr;
- keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
- keys.ports.src = inet->inet_sport;
- keys.ports.dst = inet->inet_dport;
-
- sk->sk_txhash = flow_hash_from_keys(&keys);
-}
-
static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
{
const struct iphdr *iph = skb_gro_network_header(skb);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3b76849c190f..276328e3daa6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -51,6 +51,8 @@ struct fib6_config {
struct nlattr *fc_mp;
struct nl_info fc_nlinfo;
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
};
struct fib6_node {
@@ -131,6 +133,7 @@ struct rt6_info {
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len;
u8 rt6i_protocol;
+ struct lwtunnel_state *rt6i_lwtstate;
};
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 5fa643b4e891..a37d0432bebd 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -44,7 +44,9 @@ struct fib_config {
u32 fc_flow;
u32 fc_nlflags;
struct nl_info fc_nlinfo;
- };
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
+};
struct fib_info;
struct rtable;
@@ -89,6 +91,7 @@ struct fib_nh {
struct rtable __rcu * __percpu *nh_pcpu_rth_output;
struct rtable __rcu *nh_rth_input;
struct fnhe_hash_bucket __rcu *nh_exceptions;
+ struct lwtunnel_state *nh_lwtstate;
};
/*
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index d8214cb88bbc..984dbfa15e13 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -9,9 +9,9 @@
#include <net/dsfield.h>
#include <net/gro_cells.h>
#include <net/inet_ecn.h>
-#include <net/ip.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
+#include <net/lwtunnel.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -22,6 +22,37 @@
/* Keep error state on tunnel for 30 sec */
#define IPTUNNEL_ERR_TIMEO (30*HZ)
+/* Used to memset ip_tunnel padding. */
+#define IP_TUNNEL_KEY_SIZE \
+ (offsetof(struct ip_tunnel_key, tp_dst) + \
+ FIELD_SIZEOF(struct ip_tunnel_key, tp_dst))
+
+struct ip_tunnel_key {
+ __be64 tun_id;
+ __be32 ipv4_src;
+ __be32 ipv4_dst;
+ __be16 tun_flags;
+ __u8 ipv4_tos;
+ __u8 ipv4_ttl;
+ __be16 tp_src;
+ __be16 tp_dst;
+} __packed __aligned(4); /* Minimize padding. */
+
+/* Indicates whether the tunnel info structure represents receive
+ * or transmit tunnel parameters.
+ */
+enum {
+ IP_TUNNEL_INFO_RX,
+ IP_TUNNEL_INFO_TX,
+};
+
+struct ip_tunnel_info {
+ struct ip_tunnel_key key;
+ const void *options;
+ u8 options_len;
+ u8 mode;
+};
+
/* 6rd prefix/relay information */
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm {
@@ -51,6 +82,8 @@ struct ip_tunnel_dst {
__be32 saddr;
};
+struct metadata_dst;
+
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
@@ -84,6 +117,7 @@ struct ip_tunnel {
unsigned int prl_count; /* # of entries in PRL */
int ip_tnl_net_id;
struct gro_cells gro_cells;
+ bool collect_md;
};
#define TUNNEL_CSUM __cpu_to_be16(0x01)
@@ -118,6 +152,7 @@ struct tnl_ptk_info {
struct ip_tunnel_net {
struct net_device *fb_tunnel_dev;
struct hlist_head tunnels[IP_TNL_HASH_SIZE];
+ struct ip_tunnel __rcu *collect_md_tun;
};
struct ip_tunnel_encap_ops {
@@ -136,6 +171,47 @@ int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
unsigned int num);
+static inline void __ip_tunnel_info_init(struct ip_tunnel_info *tun_info,
+ __be32 saddr, __be32 daddr,
+ u8 tos, u8 ttl,
+ __be16 tp_src, __be16 tp_dst,
+ __be64 tun_id, __be16 tun_flags,
+ const void *opts, u8 opts_len)
+{
+ tun_info->key.tun_id = tun_id;
+ tun_info->key.ipv4_src = saddr;
+ tun_info->key.ipv4_dst = daddr;
+ tun_info->key.ipv4_tos = tos;
+ tun_info->key.ipv4_ttl = ttl;
+ tun_info->key.tun_flags = tun_flags;
+
+ /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
+ * the upper tunnel are used.
+ * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
+ */
+ tun_info->key.tp_src = tp_src;
+ tun_info->key.tp_dst = tp_dst;
+
+ /* Clear struct padding. */
+ if (sizeof(tun_info->key) != IP_TUNNEL_KEY_SIZE)
+ memset((unsigned char *)&tun_info->key + IP_TUNNEL_KEY_SIZE,
+ 0, sizeof(tun_info->key) - IP_TUNNEL_KEY_SIZE);
+
+ tun_info->options = opts;
+ tun_info->options_len = opts_len;
+}
+
+static inline void ip_tunnel_info_init(struct ip_tunnel_info *tun_info,
+ const struct iphdr *iph,
+ __be16 tp_src, __be16 tp_dst,
+ __be64 tun_id, __be16 tun_flags,
+ const void *opts, u8 opts_len)
+{
+ __ip_tunnel_info_init(tun_info, iph->saddr, iph->daddr,
+ iph->tos, iph->ttl, tp_src, tp_dst,
+ tun_id, tun_flags, opts, opts_len);
+}
+
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
@@ -163,7 +239,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
__be32 key);
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
- const struct tnl_ptk_info *tpi, bool log_ecn_error);
+ const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+ bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
@@ -221,6 +298,44 @@ static inline void iptunnel_xmit_stats(int err,
}
}
+static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info, size_t n)
+{
+ return info + 1;
+}
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+ return (struct ip_tunnel_info *)lwtstate->data;
+}
+
+extern struct static_key ip_tunnel_metadata_cnt;
+
+/* Returns > 0 if metadata should be collected */
+static inline int ip_tunnel_collect_metadata(void)
+{
+ return static_key_false(&ip_tunnel_metadata_cnt);
+}
+
+void __init ip_tunnel_core_init(void);
+
+void ip_tunnel_need_metadata(void);
+void ip_tunnel_unneed_metadata(void);
+
+#else /* CONFIG_INET */
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+ return NULL;
+}
+
+static inline void ip_tunnel_need_metadata(void)
+{
+}
+
+static inline void ip_tunnel_unneed_metadata(void)
+{
+}
+
#endif /* CONFIG_INET */
#endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 82dbdb092a5d..711cca428cc8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -707,54 +707,69 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline void ip6_set_txhash(struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct flow_keys keys;
- memset(&keys, 0, sizeof(keys));
+/* Sysctl settings for net ipv6.auto_flowlabels */
+#define IP6_AUTO_FLOW_LABEL_OFF 0
+#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
+#define IP6_AUTO_FLOW_LABEL_OPTIN 2
+#define IP6_AUTO_FLOW_LABEL_FORCED 3
- memcpy(&keys.addrs.v6addrs.src, &np->saddr,
- sizeof(keys.addrs.v6addrs.src));
- memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
- sizeof(keys.addrs.v6addrs.dst));
- keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
- keys.ports.src = inet->inet_sport;
- keys.ports.dst = inet->inet_dport;
+#define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED
- sk->sk_txhash = flow_hash_from_keys(&keys);
-}
+#define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT
static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
- __be32 flowlabel, bool autolabel)
+ __be32 flowlabel, bool autolabel,
+ struct flowi6 *fl6)
{
- if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
- u32 hash;
+ u32 hash;
+
+ if (flowlabel ||
+ net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
+ (!autolabel &&
+ net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
+ return flowlabel;
- hash = skb_get_hash(skb);
+ hash = skb_get_hash_flowi6(skb, fl6);
- /* Since this is being sent on the wire obfuscate hash a bit
- * to minimize possbility that any useful information to an
- * attacker is leaked. Only lower 20 bits are relevant.
- */
- hash ^= hash >> 12;
+ /* Since this is being sent on the wire obfuscate hash a bit
+ * to minimize possbility that any useful information to an
+ * attacker is leaked. Only lower 20 bits are relevant.
+ */
+ rol32(hash, 16);
- flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+ flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
- if (net->ipv6.sysctl.flowlabel_state_ranges)
- flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
- }
+ if (net->ipv6.sysctl.flowlabel_state_ranges)
+ flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
return flowlabel;
}
+
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+ switch (net->ipv6.sysctl.auto_flowlabels) {
+ case IP6_AUTO_FLOW_LABEL_OFF:
+ case IP6_AUTO_FLOW_LABEL_OPTIN:
+ default:
+ return 0;
+ case IP6_AUTO_FLOW_LABEL_OPTOUT:
+ case IP6_AUTO_FLOW_LABEL_FORCED:
+ return 1;
+ }
+}
#else
static inline void ip6_set_txhash(struct sock *sk) { }
static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
- __be32 flowlabel, bool autolabel)
+ __be32 flowlabel, bool autolabel,
+ struct flowi6 *fl6)
{
return flowlabel;
}
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+ return 0;
+}
#endif
@@ -832,7 +847,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
&inet6_sk(sk)->cork);
}
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
new file mode 100644
index 000000000000..33bd30963a95
--- /dev/null
+++ b/include/net/lwtunnel.h
@@ -0,0 +1,147 @@
+#ifndef __NET_LWTUNNEL_H
+#define __NET_LWTUNNEL_H 1
+
+#include <linux/lwtunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/route.h>
+
+#define LWTUNNEL_HASH_BITS 7
+#define LWTUNNEL_HASH_SIZE (1 << LWTUNNEL_HASH_BITS)
+
+/* lw tunnel state flags */
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT 0x1
+
+struct lwtunnel_state {
+ __u16 type;
+ __u16 flags;
+ atomic_t refcnt;
+ int len;
+ __u8 data[0];
+};
+
+struct lwtunnel_encap_ops {
+ int (*build_state)(struct net_device *dev, struct nlattr *encap,
+ struct lwtunnel_state **ts);
+ int (*output)(struct sock *sk, struct sk_buff *skb);
+ int (*fill_encap)(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate);
+ int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+ int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+};
+
+#ifdef CONFIG_LWTUNNEL
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+ if (lws)
+ atomic_inc(&lws->refcnt);
+
+ return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+ if (!lws)
+ return;
+
+ if (atomic_dec_and_test(&lws->refcnt))
+ kfree(lws);
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_OUTPUT_REDIRECT))
+ return true;
+
+ return false;
+}
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ struct lwtunnel_state **lws);
+int lwtunnel_fill_encap(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate);
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
+struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_output6(struct sock *sk, struct sk_buff *skb);
+
+#else
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+ return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num)
+{
+ return -EOPNOTSUPP;
+
+}
+
+static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ struct lwtunnel_state **lws)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_fill_encap(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ return 0;
+}
+
+static inline int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+ return 0;
+}
+
+static inline struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
+{
+ return NULL;
+}
+
+static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
+ struct lwtunnel_state *b)
+{
+ return 0;
+}
+
+static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* __NET_LWTUNNEL_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index f534a46911dc..b7f99615224b 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -321,23 +321,6 @@ int ieee802154_register_hw(struct ieee802154_hw *hw);
void ieee802154_unregister_hw(struct ieee802154_hw *hw);
/**
- * ieee802154_rx - receive frame
- *
- * Use this function to hand received frames to mac802154. The receive
- * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
- * paged @skb is used, the driver is recommended to put the ieee802154
- * header of the frame on the linear part of the @skb to avoid memory
- * allocation and/or memcpy by the stack.
- *
- * This function may not be called in IRQ context. Calls to this function
- * for a single hardware must be synchronized against each other.
- *
- * @hw: the hardware this frame came in on
- * @skb: the buffer to receive, owned by mac802154 after this call
- */
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
-
-/**
* ieee802154_rx_irqsafe - receive frame
*
* Like ieee802154_rx() but can be called in IRQ context
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
new file mode 100644
index 000000000000..4757997f76ed
--- /dev/null
+++ b/include/net/mpls_iptunnel.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 Cumulus Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_IPTUNNEL_H
+#define _NET_MPLS_IPTUNNEL_H 1
+
+#define MAX_NEW_LABELS 2
+
+struct mpls_iptunnel_encap {
+ u32 label[MAX_NEW_LABELS];
+ u32 labels;
+};
+
+static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
+{
+ return (struct mpls_iptunnel_encap *)lwtstate->data;
+}
+
+#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index bd33e66f49aa..8b683841e574 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -125,6 +125,7 @@ struct neigh_statistics {
unsigned long forced_gc_runs; /* number of forced GC runs */
unsigned long unres_discards; /* number of unresolved drops */
+ unsigned long table_fulls; /* times even gc couldn't help */
};
#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 8d93544a2d2b..c0368db6df54 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -31,6 +31,7 @@ struct netns_sysctl_ipv6 {
int auto_flowlabels;
int icmpv6_time;
int anycast_src_echo_reply;
+ int ip_nonlocal_bind;
int fwmark_reflect;
int idgen_retries;
int idgen_delay;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 532e4ba64f49..38aa4983e2a9 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -14,5 +14,6 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
+ struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
};
#endif
diff --git a/include/net/route.h b/include/net/route.h
index fe22d03afb6a..2d45f419477f 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -66,6 +66,7 @@ struct rtable {
struct list_head rt_uncached;
struct uncached_list *rt_uncached_list;
+ struct lwtunnel_state *rt_lwtstate;
};
static inline bool rt_is_input_route(const struct rtable *rt)
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 343d922d15c2..18fdb98185ab 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -141,6 +141,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
struct nlattr *tb[]);
+int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2738f6f87908..2eab08c38e32 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -513,17 +513,20 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}
-static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
- const struct sk_buff *skb)
+static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+ const struct sk_buff *skb)
{
- struct gnet_stats_basic_cpu *bstats =
- this_cpu_ptr(sch->cpu_bstats);
-
u64_stats_update_begin(&bstats->syncp);
bstats_update(&bstats->bstats, skb);
u64_stats_update_end(&bstats->syncp);
}
+static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+}
+
static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
@@ -547,16 +550,24 @@ static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
sch->qstats.drops += count;
}
-static inline void qdisc_qstats_drop(struct Qdisc *sch)
+static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
{
- sch->qstats.drops++;
+ qstats->drops++;
}
-static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
{
- struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+ qstats->overlimits++;
+}
- qstats->drops++;
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+ qstats_drop_inc(&sch->qstats);
+}
+
+static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
+{
+ qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
}
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
diff --git a/include/net/sock.h b/include/net/sock.h
index f21f0708ec59..43c6abcf06ab 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -429,7 +429,9 @@ struct sock {
void *sk_security;
#endif
__u32 sk_mark;
+#ifdef CONFIG_CGROUP_NET_CLASSID
u32 sk_classid;
+#endif
struct cg_proto *sk_cgrp;
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk);
@@ -1685,6 +1687,20 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
kuid_t sock_i_uid(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);
+static inline void sk_set_txhash(struct sock *sk)
+{
+ sk->sk_txhash = prandom_u32();
+
+ if (unlikely(!sk->sk_txhash))
+ sk->sk_txhash = 1;
+}
+
+static inline void sk_rethink_txhash(struct sock *sk)
+{
+ if (sk->sk_txhash)
+ sk_set_txhash(sk);
+}
+
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
@@ -1709,6 +1725,8 @@ static inline void dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+ sk_rethink_txhash(sk);
+
if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d5671f118bfc..319baab3b48e 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -72,6 +72,7 @@ struct switchdev_obj {
struct switchdev_obj_fdb { /* PORT_FDB */
const unsigned char *addr;
u16 vid;
+ u16 ndm_state;
} fdb;
} u;
};
@@ -157,6 +158,9 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev, int idx);
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+ struct net_device *group_dev,
+ bool joining);
#else
@@ -271,6 +275,12 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
return -EOPNOTSUPP;
}
+static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
+ struct net_device *group_dev,
+ bool joining)
+{
+}
+
#endif
#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 9fc9b578908a..592a6bc02b0b 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -6,9 +6,10 @@
struct tcf_gact {
struct tcf_common common;
#ifdef CONFIG_GACT_PROB
- u16 tcfg_ptype;
- u16 tcfg_pval;
- int tcfg_paction;
+ u16 tcfg_ptype;
+ u16 tcfg_pval;
+ int tcfg_paction;
+ atomic_t packets;
#endif
};
#define to_gact(a) \
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 4dd77a1c106b..dae96bae1c19 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,7 +8,7 @@ struct tcf_mirred {
int tcfm_eaction;
int tcfm_ifindex;
int tcfm_ok_push;
- struct net_device *tcfm_dev;
+ struct net_device __rcu *tcfm_dev;
struct list_head tcfm_list;
};
#define to_mirred(a) \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 950cfecaad3c..364426a2be5a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -989,6 +989,11 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
#define TCP_INFINITE_SSTHRESH 0x7fffffff
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+ return tp->snd_cwnd < tp->snd_ssthresh;
+}
+
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
{
return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
@@ -1065,7 +1070,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
return tp->snd_cwnd < 2 * tp->max_packets_out;
return tp->is_cwnd_limited;
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 68f0ecad6c6e..1a47946f95ba 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -33,9 +33,6 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
static inline void twsk_destructor(struct sock *sk)
{
- BUG_ON(sk == NULL);
- BUG_ON(sk->sk_prot == NULL);
- BUG_ON(sk->sk_prot->twsk_prot == NULL);
if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
sk->sk_prot->twsk_prot->twsk_destructor(sk);
}
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0082b5d33d7d..e4534f1b2d8c 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -7,6 +7,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/udp.h>
+#include <net/dst_metadata.h>
#define VNI_HASH_BITS 10
#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
@@ -94,20 +95,18 @@ struct vxlanhdr {
#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8)
#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS 8
+#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
+
struct vxlan_metadata {
- __be32 vni;
u32 gbp;
};
-struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
- struct vxlan_metadata *md);
-
/* per UDP socket information */
struct vxlan_sock {
struct hlist_node hlist;
- vxlan_rcv_t *rcv;
- void *data;
struct work_struct del_work;
struct socket *sock;
struct rcu_head rcu;
@@ -117,6 +116,57 @@ struct vxlan_sock {
u32 flags;
};
+union vxlan_addr {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr sa;
+};
+
+struct vxlan_rdst {
+ union vxlan_addr remote_ip;
+ __be16 remote_port;
+ u32 remote_vni;
+ u32 remote_ifindex;
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+struct vxlan_config {
+ union vxlan_addr remote_ip;
+ union vxlan_addr saddr;
+ u32 vni;
+ int remote_ifindex;
+ int mtu;
+ __be16 dst_port;
+ __u16 port_min;
+ __u16 port_max;
+ __u8 tos;
+ __u8 ttl;
+ u32 flags;
+ unsigned long age_interval;
+ unsigned int addrmax;
+ bool no_share;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+ struct hlist_node hlist; /* vni hash table */
+ struct list_head next; /* vxlan's per namespace list */
+ struct vxlan_sock *vn_sock; /* listening socket */
+ struct net_device *dev;
+ struct net *net; /* netns for packet i/o */
+ struct vxlan_rdst default_dst; /* default destination */
+ u32 flags; /* VXLAN_F_* in vxlan.h */
+
+ struct timer_list age_timer;
+ spinlock_t hash_lock;
+ unsigned int addrcnt;
+
+ struct vxlan_config cfg;
+
+ struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
#define VXLAN_F_LEARN 0x01
#define VXLAN_F_PROXY 0x02
#define VXLAN_F_RSC 0x04
@@ -130,6 +180,7 @@ struct vxlan_sock {
#define VXLAN_F_REMCSUM_RX 0x400
#define VXLAN_F_GBP 0x800
#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
+#define VXLAN_F_COLLECT_METADATA 0x2000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -137,18 +188,16 @@ struct vxlan_sock {
#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_REMCSUM_RX | \
- VXLAN_F_REMCSUM_NOPARTIAL)
-
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
- bool no_share, u32 flags);
+ VXLAN_F_REMCSUM_NOPARTIAL | \
+ VXLAN_F_COLLECT_METADATA)
-void vxlan_sock_release(struct vxlan_sock *vs);
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type, struct vxlan_config *conf);
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
- bool xnet, u32 vxflags);
+static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan)
+{
+ return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport;
+}
static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
netdev_features_t features)
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 1ff9942718fe..aafb9937b162 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -243,6 +243,7 @@ header-y += limits.h
header-y += llc.h
header-y += loop.h
header-y += lp.h
+header-y += lwtunnel.h
header-y += magic.h
header-y += major.h
header-y += map_to_7segment.h
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 29ef6f99e43d..92a48e2d5461 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -114,6 +114,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_HASH,
BPF_MAP_TYPE_ARRAY,
BPF_MAP_TYPE_PROG_ARRAY,
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY,
};
enum bpf_prog_type {
@@ -249,6 +250,28 @@ enum bpf_func_id {
* Return: 0 on success
*/
BPF_FUNC_get_current_comm,
+
+ /**
+ * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
+ * @skb: pointer to skb
+ * Return: classid if != 0
+ */
+ BPF_FUNC_get_cgroup_classid,
+ BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
+ BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */
+
+ /**
+ * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
+ * retrieve or populate tunnel metadata
+ * @skb: pointer to skb
+ * @key: pointer to 'struct bpf_tunnel_key'
+ * @size: size of 'struct bpf_tunnel_key'
+ * @flags: room for future extensions
+ * Retrun: 0 on success
+ */
+ BPF_FUNC_skb_get_tunnel_key,
+ BPF_FUNC_skb_set_tunnel_key,
+ BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */
__BPF_FUNC_MAX_ID,
};
@@ -269,6 +292,12 @@ struct __sk_buff {
__u32 ifindex;
__u32 tc_index;
__u32 cb[5];
+ __u32 hash;
+};
+
+struct bpf_tunnel_key {
+ __u32 tunnel_id;
+ __u32 remote_ipv4;
};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index cd67aec187d9..cd1629170103 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1093,6 +1093,11 @@ struct ethtool_sfeatures {
* the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
* respectively. For example, if the device supports HWTSTAMP_TX_ON,
* then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ *
+ * Drivers should only report the filters they actually support without
+ * upscaling in the SIOCSHWTSTAMP ioctl. If the SIOCSHWSTAMP request for
+ * HWTSTAMP_FILTER_V1_SYNC is supported by HWTSTAMP_FILTER_V1_EVENT, then the
+ * driver should only report HWTSTAMP_FILTER_V1_EVENT in this op.
*/
struct ethtool_ts_info {
__u32 cmd;
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 2b82d7e30974..96161b8202b5 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -43,7 +43,7 @@ enum {
FRA_UNUSED5,
FRA_FWMARK, /* mark */
FRA_FLOW, /* flow/class id */
- FRA_UNUSED6,
+ FRA_TUN_ID,
FRA_SUPPRESS_IFGROUP,
FRA_SUPPRESS_PREFIXLEN,
FRA_TABLE, /* Extended table id */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index eaaea6208b42..3635b7797508 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -182,6 +182,7 @@ struct br_mdb_entry {
#define MDB_TEMPORARY 0
#define MDB_PERMANENT 1
__u8 state;
+ __u16 vid;
struct {
union {
__be32 ip4;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 2c7e8e3d3981..d450be36add2 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -148,6 +148,7 @@ enum {
IFLA_PHYS_SWITCH_ID,
IFLA_LINK_NETNSID,
IFLA_PHYS_PORT_NAME,
+ IFLA_PROTO_DOWN,
__IFLA_MAX
};
@@ -229,6 +230,7 @@ enum {
IFLA_BR_AGEING_TIME,
IFLA_BR_STP_STATE,
IFLA_BR_PRIORITY,
+ IFLA_BR_VLAN_FILTERING,
__IFLA_BR_MAX,
};
@@ -381,6 +383,7 @@ enum {
IFLA_VXLAN_REMCSUM_RX,
IFLA_VXLAN_GBP,
IFLA_VXLAN_REMCSUM_NOPARTIAL,
+ IFLA_VXLAN_COLLECT_METADATA,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -431,6 +434,7 @@ enum {
IFLA_BOND_AD_ACTOR_SYS_PRIO,
IFLA_BOND_AD_USER_PORT_KEY,
IFLA_BOND_AD_ACTOR_SYSTEM,
+ IFLA_BOND_TLB_DYNAMIC_LB,
__IFLA_BOND_MAX,
};
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index bd3cc11a431f..af4de90ba27d 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -112,6 +112,7 @@ enum {
IFLA_GRE_ENCAP_FLAGS,
IFLA_GRE_ENCAP_SPORT,
IFLA_GRE_ENCAP_DPORT,
+ IFLA_GRE_COLLECT_METADATA,
__IFLA_GRE_MAX,
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 5efa54ae567c..80f3b74446a1 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -171,6 +171,8 @@ enum {
DEVCONF_USE_OPTIMISTIC,
DEVCONF_ACCEPT_RA_MTU,
DEVCONF_STABLE_SECRET,
+ DEVCONF_USE_OIF_ADDRS_ONLY,
+ DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
new file mode 100644
index 000000000000..31377bbea3f8
--- /dev/null
+++ b/include/uapi/linux/lwtunnel.h
@@ -0,0 +1,16 @@
+#ifndef _UAPI_LWTUNNEL_H_
+#define _UAPI_LWTUNNEL_H_
+
+#include <linux/types.h>
+
+enum lwtunnel_encap_types {
+ LWTUNNEL_ENCAP_NONE,
+ LWTUNNEL_ENCAP_MPLS,
+ LWTUNNEL_ENCAP_IP,
+ __LWTUNNEL_ENCAP_MAX,
+};
+
+#define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
+
+
+#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index 139d4dd1cab8..24a6cb1aec86 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -41,4 +41,6 @@ struct mpls_label {
#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */
#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */
+#define MPLS_LABEL_FIRST_UNRESERVED 16 /* RFC3032 */
+
#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
new file mode 100644
index 000000000000..d80a0498f77e
--- /dev/null
+++ b/include/uapi/linux/mpls_iptunnel.h
@@ -0,0 +1,28 @@
+/*
+ * mpls tunnel api
+ *
+ * Authors:
+ * Roopa Prabhu <roopa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_MPLS_IPTUNNEL_H
+#define _UAPI_LINUX_MPLS_IPTUNNEL_H
+
+/* MPLS tunnel attributes
+ * [RTA_ENCAP] = {
+ * [MPLS_IPTUNNEL_DST]
+ * }
+ */
+enum {
+ MPLS_IPTUNNEL_UNSPEC,
+ MPLS_IPTUNNEL_DST,
+ __MPLS_IPTUNNEL_MAX,
+};
+#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
+
+#endif /* _UAPI_LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 2e35c61bbdd1..788655bfa0f3 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -106,6 +106,7 @@ struct ndt_stats {
__u64 ndts_rcv_probes_ucast;
__u64 ndts_periodic_gc_runs;
__u64 ndts_forced_gc_runs;
+ __u64 ndts_table_fulls;
};
enum {
diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
index ceeefe6681b5..ed4e776e1242 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
@@ -13,6 +13,8 @@ enum sctp_conntrack {
SCTP_CONNTRACK_SHUTDOWN_SENT,
SCTP_CONNTRACK_SHUTDOWN_RECD,
SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+ SCTP_CONNTRACK_HEARTBEAT_SENT,
+ SCTP_CONNTRACK_HEARTBEAT_ACKED,
SCTP_CONNTRACK_MAX
};
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index 1ab0b97b3a1e..f2c10dc140d6 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -92,6 +92,8 @@ enum ctattr_timeout_sctp {
CTA_TIMEOUT_SCTP_SHUTDOWN_SENT,
CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
__CTA_TIMEOUT_SCTP_MAX
};
#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 1dab77601c21..d6b885460187 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -321,7 +321,7 @@ enum ovs_key_attr {
* the accepted length of the array. */
#ifdef __KERNEL__
- OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */
+ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */
#endif
__OVS_KEY_ATTR_MAX
};
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index fdd8f07f1d34..47d24cb3fbc1 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -286,6 +286,21 @@ enum rt_class_t {
/* Routing message attributes */
+enum ip_tunnel_t {
+ IP_TUN_UNSPEC,
+ IP_TUN_ID,
+ IP_TUN_DST,
+ IP_TUN_SRC,
+ IP_TUN_TTL,
+ IP_TUN_TOS,
+ IP_TUN_SPORT,
+ IP_TUN_DPORT,
+ IP_TUN_FLAGS,
+ __IP_TUN_MAX,
+};
+
+#define IP_TUN_MAX (__IP_TUN_MAX - 1)
+
enum rtattr_type_t {
RTA_UNSPEC,
RTA_DST,
@@ -308,6 +323,8 @@ enum rtattr_type_t {
RTA_VIA,
RTA_NEWDST,
RTA_PREF,
+ RTA_ENCAP_TYPE,
+ RTA_ENCAP,
__RTA_MAX
};
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index eee8968407f0..25a9ad8bcef1 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -278,6 +278,8 @@ enum
LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
LINUX_MIB_TCPWINPROBE, /* TCPWinProbe */
LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
+ LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
+ LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
__LINUX_MIB_MAX
};
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index cb31229a6fa4..29ace107f236 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -150,15 +150,15 @@ static int __init register_array_map(void)
}
late_initcall(register_array_map);
-static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
{
- /* only bpf_prog file descriptors can be stored in prog_array map */
+ /* only file descriptors can be stored in this type of map */
if (attr->value_size != sizeof(u32))
return ERR_PTR(-EINVAL);
return array_map_alloc(attr);
}
-static void prog_array_map_free(struct bpf_map *map)
+static void fd_array_map_free(struct bpf_map *map)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
int i;
@@ -167,21 +167,21 @@ static void prog_array_map_free(struct bpf_map *map)
/* make sure it's empty */
for (i = 0; i < array->map.max_entries; i++)
- BUG_ON(array->prog[i] != NULL);
+ BUG_ON(array->ptrs[i] != NULL);
kvfree(array);
}
-static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key)
+static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
{
return NULL;
}
/* only called from syscall */
-static int prog_array_map_update_elem(struct bpf_map *map, void *key,
- void *value, u64 map_flags)
+static int fd_array_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
- struct bpf_prog *prog, *old_prog;
+ void *new_ptr, *old_ptr;
u32 index = *(u32 *)key, ufd;
if (map_flags != BPF_ANY)
@@ -191,57 +191,75 @@ static int prog_array_map_update_elem(struct bpf_map *map, void *key,
return -E2BIG;
ufd = *(u32 *)value;
- prog = bpf_prog_get(ufd);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
-
- if (!bpf_prog_array_compatible(array, prog)) {
- bpf_prog_put(prog);
- return -EINVAL;
- }
+ new_ptr = map->ops->map_fd_get_ptr(map, ufd);
+ if (IS_ERR(new_ptr))
+ return PTR_ERR(new_ptr);
- old_prog = xchg(array->prog + index, prog);
- if (old_prog)
- bpf_prog_put_rcu(old_prog);
+ old_ptr = xchg(array->ptrs + index, new_ptr);
+ if (old_ptr)
+ map->ops->map_fd_put_ptr(old_ptr);
return 0;
}
-static int prog_array_map_delete_elem(struct bpf_map *map, void *key)
+static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
- struct bpf_prog *old_prog;
+ void *old_ptr;
u32 index = *(u32 *)key;
if (index >= array->map.max_entries)
return -E2BIG;
- old_prog = xchg(array->prog + index, NULL);
- if (old_prog) {
- bpf_prog_put_rcu(old_prog);
+ old_ptr = xchg(array->ptrs + index, NULL);
+ if (old_ptr) {
+ map->ops->map_fd_put_ptr(old_ptr);
return 0;
} else {
return -ENOENT;
}
}
+static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct bpf_prog *prog = bpf_prog_get(fd);
+ if (IS_ERR(prog))
+ return prog;
+
+ if (!bpf_prog_array_compatible(array, prog)) {
+ bpf_prog_put(prog);
+ return ERR_PTR(-EINVAL);
+ }
+ return prog;
+}
+
+static void prog_fd_array_put_ptr(void *ptr)
+{
+ struct bpf_prog *prog = ptr;
+
+ bpf_prog_put_rcu(prog);
+}
+
/* decrement refcnt of all bpf_progs that are stored in this map */
-void bpf_prog_array_map_clear(struct bpf_map *map)
+void bpf_fd_array_map_clear(struct bpf_map *map)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
int i;
for (i = 0; i < array->map.max_entries; i++)
- prog_array_map_delete_elem(map, &i);
+ fd_array_map_delete_elem(map, &i);
}
static const struct bpf_map_ops prog_array_ops = {
- .map_alloc = prog_array_map_alloc,
- .map_free = prog_array_map_free,
+ .map_alloc = fd_array_map_alloc,
+ .map_free = fd_array_map_free,
.map_get_next_key = array_map_get_next_key,
- .map_lookup_elem = prog_array_map_lookup_elem,
- .map_update_elem = prog_array_map_update_elem,
- .map_delete_elem = prog_array_map_delete_elem,
+ .map_lookup_elem = fd_array_map_lookup_elem,
+ .map_update_elem = fd_array_map_update_elem,
+ .map_delete_elem = fd_array_map_delete_elem,
+ .map_fd_get_ptr = prog_fd_array_get_ptr,
+ .map_fd_put_ptr = prog_fd_array_put_ptr,
};
static struct bpf_map_type_list prog_array_type __read_mostly = {
@@ -255,3 +273,60 @@ static int __init register_prog_array_map(void)
return 0;
}
late_initcall(register_prog_array_map);
+
+static void perf_event_array_map_free(struct bpf_map *map)
+{
+ bpf_fd_array_map_clear(map);
+ fd_array_map_free(map);
+}
+
+static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
+{
+ struct perf_event *event;
+ const struct perf_event_attr *attr;
+
+ event = perf_event_get(fd);
+ if (IS_ERR(event))
+ return event;
+
+ attr = perf_event_attrs(event);
+ if (IS_ERR(attr))
+ return (void *)attr;
+
+ if (attr->type != PERF_TYPE_RAW &&
+ attr->type != PERF_TYPE_HARDWARE) {
+ perf_event_release_kernel(event);
+ return ERR_PTR(-EINVAL);
+ }
+ return event;
+}
+
+static void perf_event_fd_array_put_ptr(void *ptr)
+{
+ struct perf_event *event = ptr;
+
+ perf_event_release_kernel(event);
+}
+
+static const struct bpf_map_ops perf_event_array_ops = {
+ .map_alloc = fd_array_map_alloc,
+ .map_free = perf_event_array_map_free,
+ .map_get_next_key = array_map_get_next_key,
+ .map_lookup_elem = fd_array_map_lookup_elem,
+ .map_update_elem = fd_array_map_update_elem,
+ .map_delete_elem = fd_array_map_delete_elem,
+ .map_fd_get_ptr = perf_event_fd_array_get_ptr,
+ .map_fd_put_ptr = perf_event_fd_array_put_ptr,
+};
+
+static struct bpf_map_type_list perf_event_array_type __read_mostly = {
+ .ops = &perf_event_array_ops,
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+};
+
+static int __init register_perf_event_array_map(void)
+{
+ bpf_register_map_type(&perf_event_array_type);
+ return 0;
+}
+late_initcall(register_perf_event_array_map);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index c5bedc82bc1c..67c380cfa9ca 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -177,6 +177,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
return 0;
}
+EXPORT_SYMBOL_GPL(__bpf_call_base);
/**
* __bpf_prog_run - run eBPF program on a given context
@@ -449,11 +450,15 @@ select_insn:
tail_call_cnt++;
- prog = READ_ONCE(array->prog[index]);
+ prog = READ_ONCE(array->ptrs[index]);
if (unlikely(!prog))
goto out;
- ARG1 = BPF_R1;
+ /* ARG1 at this point is guaranteed to point to CTX from
+ * the verifier side due to the fact that the tail call is
+ * handeled like a helper, that is, bpf_tail_call_proto,
+ * where arg1_type is ARG_PTR_TO_CTX.
+ */
insn = prog->insnsi;
goto select_insn;
out:
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a1b14d197a4f..dc9b464fefa9 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -72,7 +72,7 @@ static int bpf_map_release(struct inode *inode, struct file *filp)
/* prog_array stores refcnt-ed bpf_prog pointers
* release them all when user space closes prog_array_fd
*/
- bpf_prog_array_map_clear(map);
+ bpf_fd_array_map_clear(map);
bpf_map_put(map);
return 0;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 039d866fd36a..ed12e385fb75 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -238,6 +238,14 @@ static const char * const reg_type_str[] = {
[CONST_IMM] = "imm",
};
+static const struct {
+ int map_type;
+ int func_id;
+} func_limit[] = {
+ {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
+ {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+};
+
static void print_verifier_state(struct verifier_env *env)
{
enum bpf_reg_type t;
@@ -648,6 +656,9 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
struct verifier_state *state = &env->cur_state;
int size, err = 0;
+ if (state->regs[regno].type == PTR_TO_STACK)
+ off += state->regs[regno].imm;
+
size = bpf_size_to_bytes(bpf_size);
if (size < 0)
return size;
@@ -667,7 +678,8 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown_value(state->regs, value_regno);
- } else if (state->regs[regno].type == FRAME_PTR) {
+ } else if (state->regs[regno].type == FRAME_PTR ||
+ state->regs[regno].type == PTR_TO_STACK) {
if (off >= 0 || off < -MAX_BPF_STACK) {
verbose("invalid stack off=%d size=%d\n", off, size);
return -EACCES;
@@ -833,6 +845,28 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
return err;
}
+static int check_map_func_compatibility(struct bpf_map *map, int func_id)
+{
+ bool bool_map, bool_func;
+ int i;
+
+ if (!map)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
+ bool_map = (map->map_type == func_limit[i].map_type);
+ bool_func = (func_id == func_limit[i].func_id);
+ /* only when map & func pair match it can continue.
+ * don't allow any other map type to be passed into
+ * the special func;
+ */
+ if (bool_map != bool_func)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int check_call(struct verifier_env *env, int func_id)
{
struct verifier_state *state = &env->cur_state;
@@ -908,21 +942,9 @@ static int check_call(struct verifier_env *env, int func_id)
return -EINVAL;
}
- if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
- func_id != BPF_FUNC_tail_call)
- /* prog_array map type needs extra care:
- * only allow to pass it into bpf_tail_call() for now.
- * bpf_map_delete_elem() can be allowed in the future,
- * while bpf_map_update_elem() must only be done via syscall
- */
- return -EINVAL;
-
- if (func_id == BPF_FUNC_tail_call &&
- map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
- /* don't allow any other map type to be passed into
- * bpf_tail_call()
- */
- return -EINVAL;
+ err = check_map_func_compatibility(map, func_id);
+ if (err)
+ return err;
return 0;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3dae3419b99..e2c6a8886d4d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3212,6 +3212,59 @@ static inline u64 perf_event_count(struct perf_event *event)
return __perf_event_count(event);
}
+/*
+ * NMI-safe method to read a local event, that is an event that
+ * is:
+ * - either for the current task, or for this CPU
+ * - does not have inherit set, for inherited task events
+ * will not be local and we cannot read them atomically
+ * - must not have a pmu::count method
+ */
+u64 perf_event_read_local(struct perf_event *event)
+{
+ unsigned long flags;
+ u64 val;
+
+ /*
+ * Disabling interrupts avoids all counter scheduling (context
+ * switches, timer based rotation and IPIs).
+ */
+ local_irq_save(flags);
+
+ /* If this is a per-task event, it must be for current */
+ WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
+ event->hw.target != current);
+
+ /* If this is a per-CPU event, it must be for this CPU */
+ WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
+ event->cpu != smp_processor_id());
+
+ /*
+ * It must not be an event with inherit set, we cannot read
+ * all child counters from atomic context.
+ */
+ WARN_ON_ONCE(event->attr.inherit);
+
+ /*
+ * It must not have a pmu::count method, those are not
+ * NMI safe.
+ */
+ WARN_ON_ONCE(event->pmu->count);
+
+ /*
+ * If the event is currently on this CPU, its either a per-task event,
+ * or local to this CPU. Furthermore it means its ACTIVE (otherwise
+ * oncpu == -1).
+ */
+ if (event->oncpu == smp_processor_id())
+ event->pmu->read(event);
+
+ val = local64_read(&event->count);
+ local_irq_restore(flags);
+
+ return val;
+}
+
static u64 perf_event_read(struct perf_event *event)
{
/*
@@ -8574,6 +8627,31 @@ void perf_event_delayed_put(struct task_struct *task)
WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}
+struct perf_event *perf_event_get(unsigned int fd)
+{
+ int err;
+ struct fd f;
+ struct perf_event *event;
+
+ err = perf_fget_light(fd, &f);
+ if (err)
+ return ERR_PTR(err);
+
+ event = f.file->private_data;
+ atomic_long_inc(&event->refcount);
+ fdput(f);
+
+ return event;
+}
+
+const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+ if (!event)
+ return ERR_PTR(-EINVAL);
+
+ return &event->attr;
+}
+
/*
* inherit a event from parent task to child task:
*/
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 88a041adee90..ef9936df1b04 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -158,6 +158,35 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
return &bpf_trace_printk_proto;
}
+static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
+{
+ struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct perf_event *event;
+
+ if (unlikely(index >= array->map.max_entries))
+ return -E2BIG;
+
+ event = (struct perf_event *)array->ptrs[index];
+ if (!event)
+ return -ENOENT;
+
+ /*
+ * we don't know if the function is run successfully by the
+ * return value. It can be judged in other places, such as
+ * eBPF programs.
+ */
+ return perf_event_read_local(event);
+}
+
+const struct bpf_func_proto bpf_perf_event_read_proto = {
+ .func = bpf_perf_event_read,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
@@ -183,6 +212,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
return bpf_get_trace_printk_proto();
case BPF_FUNC_get_smp_processor_id:
return &bpf_get_smp_processor_id_proto;
+ case BPF_FUNC_perf_event_read:
+ return &bpf_perf_event_read_proto;
default:
return NULL;
}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 7f58c735d745..d1377390b3ad 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -18,10 +18,12 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/filter.h>
+#include <linux/bpf.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
+#include <linux/highmem.h>
/* General test specific settings */
#define MAX_SUBTESTS 3
@@ -55,6 +57,7 @@
/* Flags that can be passed to test cases */
#define FLAG_NO_DATA BIT(0)
#define FLAG_EXPECTED_FAIL BIT(1)
+#define FLAG_SKB_FRAG BIT(2)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -80,6 +83,7 @@ struct bpf_test {
__u32 result;
} test[MAX_SUBTESTS];
int (*fill_helper)(struct bpf_test *self);
+ __u8 frag_data[MAX_DATA];
};
/* Large test cases need separate allocation and fill handler. */
@@ -355,6 +359,81 @@ static int bpf_fill_ja(struct bpf_test *self)
return __bpf_fill_ja(self, 12, 9);
}
+static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len - 1; i += 2) {
+ insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
+ insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU);
+ }
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+#define PUSH_CNT 68
+/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i = 0, j, k = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_MOV64_REG(R6, R1);
+loop:
+ for (j = 0; j < PUSH_CNT; j++) {
+ insn[i++] = BPF_LD_ABS(BPF_B, 0);
+ insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+ i++;
+ insn[i++] = BPF_MOV64_REG(R1, R6);
+ insn[i++] = BPF_MOV64_IMM(R2, 1);
+ insn[i++] = BPF_MOV64_IMM(R3, 2);
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ bpf_skb_vlan_push_proto.func - __bpf_call_base);
+ insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+ i++;
+ }
+
+ for (j = 0; j < PUSH_CNT; j++) {
+ insn[i++] = BPF_LD_ABS(BPF_B, 0);
+ insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+ i++;
+ insn[i++] = BPF_MOV64_REG(R1, R6);
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ bpf_skb_vlan_pop_proto.func - __bpf_call_base);
+ insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+ i++;
+ }
+ if (++k < 5)
+ goto loop;
+
+ for (; i < len - 1; i++)
+ insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
+
+ insn[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -3674,6 +3753,9 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -3708,6 +3790,9 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -4392,6 +4477,618 @@ static struct bpf_test tests[] = {
{ { 0, 0xababcbac } },
.fill_helper = bpf_fill_maxinsns11,
},
+ {
+ "BPF_MAXINSNS: ld_abs+get_processor_id",
+ { },
+ CLASSIC,
+ { },
+ { { 1, 0xbee } },
+ .fill_helper = bpf_fill_ld_abs_get_processor_id,
+ },
+ {
+ "BPF_MAXINSNS: ld_abs+vlan_push/pop",
+ { },
+ INTERNAL,
+ { 0x34 },
+ { { 1, 0xbef } },
+ .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+ },
+ /*
+ * LD_IND / LD_ABS on fragmented SKBs
+ */
+ {
+ "LD_IND byte frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x42} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_IND halfword frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x4344} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_IND word frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x21071983} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_IND halfword mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x0519} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ {
+ "LD_IND word mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x25051982} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ {
+ "LD_ABS byte frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x42} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_ABS halfword frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x4344} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_ABS word frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x21071983} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_ABS halfword mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x0519} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ {
+ "LD_ABS word mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x25051982} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ /*
+ * LD_IND / LD_ABS on non fragmented SKBs
+ */
+ {
+ /*
+ * this tests that the JIT/interpreter correctly resets X
+ * before using it in an LD_IND instruction.
+ */
+ "LD_IND byte default X",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x1] = 0x42 },
+ { {0x40, 0x42 } },
+ },
+ {
+ "LD_IND byte positive offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 } },
+ },
+ {
+ "LD_IND byte negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x05 } },
+ },
+ {
+ "LD_IND halfword positive offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ },
+ { {0x40, 0xdd88 } },
+ },
+ {
+ "LD_IND halfword negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ },
+ { {0x40, 0xbb66 } },
+ },
+ {
+ "LD_IND halfword unaligned",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ },
+ { {0x40, 0x66cc } },
+ },
+ {
+ "LD_IND word positive offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xee99ffaa } },
+ },
+ {
+ "LD_IND word negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xaa55bb66 } },
+ },
+ {
+ "LD_IND word unaligned (addr & 3 == 2)",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xbb66cc77 } },
+ },
+ {
+ "LD_IND word unaligned (addr & 3 == 1)",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x55bb66cc } },
+ },
+ {
+ "LD_IND word unaligned (addr & 3 == 3)",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x66cc77dd } },
+ },
+ {
+ "LD_ABS byte",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xcc } },
+ },
+ {
+ "LD_ABS halfword",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xdd88 } },
+ },
+ {
+ "LD_ABS halfword unaligned",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x99ff } },
+ },
+ {
+ "LD_ABS word",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xaa55bb66 } },
+ },
+ {
+ "LD_ABS word unaligned (addr & 3 == 2)",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xdd88ee99 } },
+ },
+ {
+ "LD_ABS word unaligned (addr & 3 == 1)",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x77dd88ee } },
+ },
+ {
+ "LD_ABS word unaligned (addr & 3 == 3)",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x88ee99ff } },
+ },
+ /*
+ * verify that the interpreter or JIT correctly sets A and X
+ * to 0.
+ */
+ {
+ "ADD default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A + X
+ * ret A
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ {
+ "ADD default A",
+ .u.insns = {
+ /*
+ * A = A + 0x42
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ {
+ "SUB default X",
+ .u.insns = {
+ /*
+ * A = 0x66
+ * A = A - X
+ * ret A
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x66),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x66 } },
+ },
+ {
+ "SUB default A",
+ .u.insns = {
+ /*
+ * A = A - -0x66
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x66 } },
+ },
+ {
+ "MUL default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A * X
+ * ret A
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "MUL default A",
+ .u.insns = {
+ /*
+ * A = A * 0x66
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "DIV default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A / X ; this halt the filter execution if X is 0
+ * ret 0x42
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "DIV default A",
+ .u.insns = {
+ /*
+ * A = A / 1
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "JMP EQ default A",
+ .u.insns = {
+ /*
+ * cmp A, 0x0, 0, 1
+ * ret 0x42
+ * ret 0x66
+ */
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ BPF_STMT(BPF_RET | BPF_K, 0x66),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ {
+ "JMP EQ default X",
+ .u.insns = {
+ /*
+ * A = 0x0
+ * cmp A, X, 0, 1
+ * ret 0x42
+ * ret 0x66
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ BPF_STMT(BPF_RET | BPF_K, 0x66),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
};
static struct net_device dev;
@@ -4427,6 +5124,9 @@ static struct sk_buff *populate_skb(char *buf, int size)
static void *generate_test_data(struct bpf_test *test, int sub)
{
+ struct sk_buff *skb;
+ struct page *page;
+
if (test->aux & FLAG_NO_DATA)
return NULL;
@@ -4434,7 +5134,38 @@ static void *generate_test_data(struct bpf_test *test, int sub)
* subtests generate skbs of different sizes based on
* the same data.
*/
- return populate_skb(test->data, test->test[sub].data_size);
+ skb = populate_skb(test->data, test->test[sub].data_size);
+ if (!skb)
+ return NULL;
+
+ if (test->aux & FLAG_SKB_FRAG) {
+ /*
+ * when the test requires a fragmented skb, add a
+ * single fragment to the skb, filled with
+ * test->frag_data.
+ */
+ void *ptr;
+
+ page = alloc_page(GFP_KERNEL);
+
+ if (!page)
+ goto err_kfree_skb;
+
+ ptr = kmap(page);
+ if (!ptr)
+ goto err_free_page;
+ memcpy(ptr, test->frag_data, MAX_DATA);
+ kunmap(page);
+ skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
+ }
+
+ return skb;
+
+err_free_page:
+ __free_page(page);
+err_kfree_skb:
+ kfree_skb(skb);
+ return NULL;
}
static void release_test_data(const struct bpf_test *test, void *data)
@@ -4515,6 +5246,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
}
fp->len = flen;
+ /* Type doesn't really matter here as long as it's not unspec. */
+ fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
bpf_prog_select_runtime(fp);
@@ -4545,14 +5278,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish;
int ret = 0, i;
- start = ktime_to_us(ktime_get());
+ start = ktime_get_ns();
for (i = 0; i < runs; i++)
ret = BPF_PROG_RUN(fp, data);
- finish = ktime_to_us(ktime_get());
+ finish = ktime_get_ns();
- *duration = (finish - start) * 1000ULL;
+ *duration = finish - start;
do_div(*duration, runs);
return ret;
@@ -4572,6 +5305,11 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
break;
data = generate_test_data(test, i);
+ if (!data && !(test->aux & FLAG_NO_DATA)) {
+ pr_cont("data generation failed ");
+ err_cnt++;
+ break;
+ }
ret = __run_one(fp, data, runs, &duration);
release_test_data(test, data);
@@ -4587,10 +5325,73 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
return err_cnt;
}
+static char test_name[64];
+module_param_string(test_name, test_name, sizeof(test_name), 0);
+
+static int test_id = -1;
+module_param(test_id, int, 0);
+
+static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 };
+module_param_array(test_range, int, NULL, 0);
+
+static __init int find_test_index(const char *test_name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!strcmp(tests[i].descr, test_name))
+ return i;
+ }
+ return -1;
+}
+
static __init int prepare_bpf_tests(void)
{
int i;
+ if (test_id >= 0) {
+ /*
+ * if a test_id was specified, use test_range to
+ * cover only that test.
+ */
+ if (test_id >= ARRAY_SIZE(tests)) {
+ pr_err("test_bpf: invalid test_id specified.\n");
+ return -EINVAL;
+ }
+
+ test_range[0] = test_id;
+ test_range[1] = test_id;
+ } else if (*test_name) {
+ /*
+ * if a test_name was specified, find it and setup
+ * test_range to cover only that test.
+ */
+ int idx = find_test_index(test_name);
+
+ if (idx < 0) {
+ pr_err("test_bpf: no test named '%s' found.\n",
+ test_name);
+ return -EINVAL;
+ }
+ test_range[0] = idx;
+ test_range[1] = idx;
+ } else {
+ /*
+ * check that the supplied test_range is valid.
+ */
+ if (test_range[0] >= ARRAY_SIZE(tests) ||
+ test_range[1] >= ARRAY_SIZE(tests) ||
+ test_range[0] < 0 || test_range[1] < 0) {
+ pr_err("test_bpf: test_range is out of bound.\n");
+ return -EINVAL;
+ }
+
+ if (test_range[1] < test_range[0]) {
+ pr_err("test_bpf: test_range is ending before it starts.\n");
+ return -EINVAL;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (tests[i].fill_helper &&
tests[i].fill_helper(&tests[i]) < 0)
@@ -4610,6 +5411,11 @@ static __init void destroy_bpf_tests(void)
}
}
+static bool exclude_test(int test_id)
+{
+ return test_id < test_range[0] || test_id > test_range[1];
+}
+
static __init int test_bpf(void)
{
int i, err_cnt = 0, pass_cnt = 0;
@@ -4619,6 +5425,9 @@ static __init int test_bpf(void)
struct bpf_prog *fp;
int err;
+ if (exclude_test(i))
+ continue;
+
pr_info("#%d %s ", i, tests[i].descr);
fp = generate_filter(i, &err);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c90777eae1f8..9af7cefb195d 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -20,6 +20,7 @@
#include <linux/rcupdate.h>
#include <linux/rhashtable.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#define MAX_ENTRIES 1000000
#define TEST_INSERT_FAIL INT_MAX
@@ -87,6 +88,8 @@ static int __init test_rht_lookup(struct rhashtable *ht)
return -EINVAL;
}
}
+
+ cond_resched_rcu();
}
return 0;
@@ -160,6 +163,8 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
} else if (err) {
return err;
}
+
+ cond_resched();
}
if (insert_fails)
@@ -183,6 +188,8 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
rhashtable_remove_fast(ht, &obj->node, test_rht_params);
}
+
+ cond_resched();
}
end = ktime_get_ns();
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 94a375c04f21..9055d7b9d112 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -613,6 +613,8 @@ EXPORT_SYMBOL_GPL(lowpan_header_compress);
static int __init lowpan_module_init(void)
{
+ request_module_nowait("ipv6");
+
request_module_nowait("nhc_dest");
request_module_nowait("nhc_fragment");
request_module_nowait("nhc_hop");
diff --git a/net/Kconfig b/net/Kconfig
index 57a7c5af3175..7021c1bf44d6 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -374,6 +374,13 @@ source "net/caif/Kconfig"
source "net/ceph/Kconfig"
source "net/nfc/Kconfig"
+config LWTUNNEL
+ bool "Network light weight tunnels"
+ ---help---
+ This feature provides an infrastructure to support light weight
+ tunnels like mpls. There is no netdevice associated with a light
+ weight tunnel endpoint. Tunnel encapsulation parameters are stored
+ with light weight tunnel state associated with fib routes.
endif # if NET
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index cc78538d163b..aa0047c5c467 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -802,13 +802,10 @@ static int br2684_seq_show(struct seq_file *seq, void *v)
(brdev->payload == p_bridged) ? "bridged" : "routed",
brvcc->copies_failed, brvcc->copies_needed);
#ifdef CONFIG_ATM_BR2684_IPFILTER
-#define b1(var, byte) ((u8 *) &brvcc->filter.var)[byte]
-#define bs(var) b1(var, 0), b1(var, 1), b1(var, 2), b1(var, 3)
if (brvcc->filter.netmask != 0)
- seq_printf(seq, " filter=%d.%d.%d.%d/"
- "%d.%d.%d.%d\n", bs(prefix), bs(netmask));
-#undef bs
-#undef b1
+ seq_printf(seq, " filter=%pI4/%pI4\n",
+ &brvcc->filter.prefix,
+ &brvcc->filter.netmask);
#endif /* CONFIG_ATM_BR2684_IPFILTER */
}
return 0;
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 2fb7b3064904..0ffe2e24020a 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -859,9 +859,22 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
SET_NETDEV_DEVTYPE(netdev, &bt_type);
+ *dev = netdev_priv(netdev);
+ (*dev)->netdev = netdev;
+ (*dev)->hdev = chan->conn->hcon->hdev;
+ INIT_LIST_HEAD(&(*dev)->peers);
+
+ spin_lock(&devices_lock);
+ INIT_LIST_HEAD(&(*dev)->list);
+ list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
+ spin_unlock(&devices_lock);
+
err = register_netdev(netdev);
if (err < 0) {
BT_INFO("register_netdev failed %d", err);
+ spin_lock(&devices_lock);
+ list_del_rcu(&(*dev)->list);
+ spin_unlock(&devices_lock);
free_netdev(netdev);
goto out;
}
@@ -871,16 +884,6 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
&chan->src, chan->src_type);
set_bit(__LINK_STATE_PRESENT, &netdev->state);
- *dev = netdev_priv(netdev);
- (*dev)->netdev = netdev;
- (*dev)->hdev = chan->conn->hcon->hdev;
- INIT_LIST_HEAD(&(*dev)->peers);
-
- spin_lock(&devices_lock);
- INIT_LIST_HEAD(&(*dev)->list);
- list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
- spin_unlock(&devices_lock);
-
return 0;
out:
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index b8c794b87523..95d1a66ba03a 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -53,6 +53,11 @@ source "net/bluetooth/cmtp/Kconfig"
source "net/bluetooth/hidp/Kconfig"
+config BT_HS
+ bool "Bluetooth High Speed (HS) features"
+ depends on BT_BREDR
+ default y
+
config BT_LE
bool "Bluetooth Low Energy (LE) features"
depends on BT
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 29c12ae72a66..2b15ae8c1def 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -13,9 +13,10 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
- a2mp.o amp.o ecc.o hci_request.o mgmt_util.o
+ ecc.o hci_request.o mgmt_util.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
+bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 5a04eb1a7e57..5f123c3320a7 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -16,6 +16,7 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
+#include "hci_request.h"
#include "a2mp.h"
#include "amp.h"
@@ -286,11 +287,21 @@ static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
return 0;
}
+static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ a2mp_send_getinfo_rsp(hdev);
+}
+
static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_info_req *req = (void *) skb->data;
struct hci_dev *hdev;
+ struct hci_request hreq;
+ int err = 0;
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
@@ -311,7 +322,11 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
}
set_bit(READ_LOC_AMP_INFO, &mgr->state);
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+ hci_req_init(&hreq, hdev);
+ hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+ err = hci_req_run(&hreq, read_local_amp_info_complete);
+ if (err < 0)
+ a2mp_send_getinfo_rsp(hdev);
done:
if (hdev)
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
index 296f665adb09..a4ff3ea9b38a 100644
--- a/net/bluetooth/a2mp.h
+++ b/net/bluetooth/a2mp.h
@@ -130,10 +130,29 @@ struct a2mp_physlink_rsp {
#define A2MP_STATUS_SECURITY_VIOLATION 0x06
struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
+
+#if IS_ENABLED(CONFIG_BT_HS)
int amp_mgr_put(struct amp_mgr *mgr);
struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
struct sk_buff *skb);
void a2mp_discover_amp(struct l2cap_chan *chan);
+#else
+static inline int amp_mgr_put(struct amp_mgr *mgr)
+{
+ return 0;
+}
+
+static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ return NULL;
+}
+
+static inline void a2mp_discover_amp(struct l2cap_chan *chan)
+{
+}
+#endif
+
void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index ee016f039100..238ddd3cf95f 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -16,6 +16,7 @@
#include <net/bluetooth/hci_core.h>
#include <crypto/hash.h>
+#include "hci_request.h"
#include "a2mp.h"
#include "amp.h"
@@ -220,10 +221,49 @@ int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
}
+static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb)
+{
+ struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
+ struct amp_assoc *assoc = &hdev->loc_assoc;
+ size_t rem_len, frag_len;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ goto send_rsp;
+
+ frag_len = skb->len - sizeof(*rp);
+ rem_len = __le16_to_cpu(rp->rem_len);
+
+ if (rem_len > frag_len) {
+ BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
+
+ memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+ assoc->offset += frag_len;
+
+ /* Read other fragments */
+ amp_read_loc_assoc_frag(hdev, rp->phy_handle);
+
+ return;
+ }
+
+ memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+ assoc->len = assoc->offset + rem_len;
+ assoc->offset = 0;
+
+send_rsp:
+ /* Send A2MP Rsp when all fragments are received */
+ a2mp_send_getampassoc_rsp(hdev, rp->status);
+ a2mp_send_create_phy_link_req(hdev, rp->status);
+}
+
void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
{
struct hci_cp_read_local_amp_assoc cp;
struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+ struct hci_request req;
+ int err = 0;
BT_DBG("%s handle %d", hdev->name, phy_handle);
@@ -231,12 +271,18 @@ void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
cp.len_so_far = cpu_to_le16(loc_assoc->offset);
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
+ if (err < 0)
+ a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
}
void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
{
struct hci_cp_read_local_amp_assoc cp;
+ struct hci_request req;
+ int err = 0;
memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
memset(&cp, 0, sizeof(cp));
@@ -244,7 +290,11 @@ void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_run_skb(&req, read_local_amp_assoc_complete);
+ if (err < 0)
+ a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
}
void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
@@ -252,6 +302,8 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
{
struct hci_cp_read_local_amp_assoc cp;
struct amp_mgr *mgr = hcon->amp_mgr;
+ struct hci_request req;
+ int err = 0;
cp.phy_handle = hcon->handle;
cp.len_so_far = cpu_to_le16(0);
@@ -260,7 +312,25 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
/* Read Local AMP Assoc final link information data */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_run_skb(&req, read_local_amp_assoc_complete);
+ if (err < 0)
+ a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
+}
+
+static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb)
+{
+ struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
+
+ BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+ hdev->name, rp->status, rp->phy_handle);
+
+ if (rp->status)
+ return;
+
+ amp_write_rem_assoc_continue(hdev, rp->phy_handle);
}
/* Write AMP Assoc data fragments, returns true with last fragment written*/
@@ -270,6 +340,7 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
struct hci_cp_write_remote_amp_assoc *cp;
struct amp_mgr *mgr = hcon->amp_mgr;
struct amp_ctrl *ctrl;
+ struct hci_request req;
u16 frag_len, len;
ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
@@ -307,7 +378,9 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
amp_ctrl_put(ctrl);
- hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_run_skb(&req, write_remote_amp_assoc_complete);
kfree(cp);
@@ -344,10 +417,37 @@ void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
amp_write_rem_assoc_frag(hdev, hcon);
}
+static void create_phylink_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ struct hci_cp_create_phy_link *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (status) {
+ struct hci_conn *hcon;
+
+ hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+ if (hcon)
+ hci_conn_del(hcon);
+ } else {
+ amp_write_remote_assoc(hdev, cp->phy_handle);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon)
{
struct hci_cp_create_phy_link cp;
+ struct hci_request req;
cp.phy_handle = hcon->handle;
@@ -360,13 +460,33 @@ void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
return;
}
- hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+ hci_req_run(&req, create_phylink_complete);
+}
+
+static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ struct hci_cp_accept_phy_link *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+ if (!cp)
+ return;
+
+ amp_write_remote_assoc(hdev, cp->phy_handle);
}
void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon)
{
struct hci_cp_accept_phy_link cp;
+ struct hci_request req;
cp.phy_handle = hcon->handle;
@@ -379,7 +499,9 @@ void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
return;
}
- hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+ hci_req_run(&req, accept_phylink_complete);
}
void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
index 7ea3db77ba89..8848f8158ae4 100644
--- a/net/bluetooth/amp.h
+++ b/net/bluetooth/amp.h
@@ -44,6 +44,20 @@ void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon);
void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon);
+
+#if IS_ENABLED(CONFIG_BT_HS)
+void amp_create_logical_link(struct l2cap_chan *chan);
+void amp_disconnect_logical_link(struct hci_chan *hchan);
+#else
+static inline void amp_create_logical_link(struct l2cap_chan *chan)
+{
+}
+
+static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
+{
+}
+#endif
+
void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index b0c6c6af76ef..9a50338772f3 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -100,9 +100,9 @@ static void cmtp_application_del(struct cmtp_session *session, struct cmtp_appli
static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value)
{
struct cmtp_application *app;
- struct list_head *p, *n;
+ struct list_head *p;
- list_for_each_safe(p, n, &session->applications) {
+ list_for_each(p, &session->applications) {
app = list_entry(p, struct cmtp_application, list);
switch (pattern) {
case CMTP_MSGNUM:
@@ -511,13 +511,13 @@ static int cmtp_proc_show(struct seq_file *m, void *v)
struct capi_ctr *ctrl = m->private;
struct cmtp_session *session = ctrl->driverdata;
struct cmtp_application *app;
- struct list_head *p, *n;
+ struct list_head *p;
seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
seq_printf(m, "addr %s\n", session->name);
seq_printf(m, "ctrl %d\n", session->num);
- list_for_each_safe(p, n, &session->applications) {
+ list_for_each(p, &session->applications) {
app = list_entry(p, struct cmtp_application, list);
seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2f8fb33067e1..bc43b6490555 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2822,10 +2822,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
{
struct hci_conn_params *params;
- /* The conn params list only contains identity addresses */
- if (!hci_is_identity_address(addr, addr_type))
- return NULL;
-
list_for_each_entry(params, &hdev->le_conn_params, list) {
if (bacmp(&params->addr, addr) == 0 &&
params->addr_type == addr_type) {
@@ -2842,10 +2838,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
{
struct hci_conn_params *param;
- /* The list only contains identity addresses */
- if (!hci_is_identity_address(addr, addr_type))
- return NULL;
-
list_for_each_entry(param, list, action) {
if (bacmp(&param->addr, addr) == 0 &&
param->addr_type == addr_type)
@@ -2861,9 +2853,6 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
{
struct hci_conn_params *params;
- if (!hci_is_identity_address(addr, addr_type))
- return NULL;
-
params = hci_conn_params_lookup(hdev, addr, addr_type);
if (params)
return params;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 32363c2b7f83..218d7dfc342f 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -823,7 +823,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
- goto a2mp_rsp;
+ return;
hdev->amp_status = rp->amp_status;
hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -835,46 +835,6 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
-
-a2mp_rsp:
- a2mp_send_getinfo_rsp(hdev);
-}
-
-static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
- struct amp_assoc *assoc = &hdev->loc_assoc;
- size_t rem_len, frag_len;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
- if (rp->status)
- goto a2mp_rsp;
-
- frag_len = skb->len - sizeof(*rp);
- rem_len = __le16_to_cpu(rp->rem_len);
-
- if (rem_len > frag_len) {
- BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
- memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
- assoc->offset += frag_len;
-
- /* Read other fragments */
- amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
- return;
- }
-
- memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
- assoc->len = assoc->offset + rem_len;
- assoc->offset = 0;
-
-a2mp_rsp:
- /* Send A2MP Rsp when all fragments are received */
- a2mp_send_getampassoc_rsp(hdev, rp->status);
- a2mp_send_create_phy_link_req(hdev, rp->status);
}
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
@@ -1409,20 +1369,6 @@ static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
-
- BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
- hdev->name, rp->status, rp->phy_handle);
-
- if (rp->status)
- return;
-
- amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_rssi *rp = (void *) skb->data;
@@ -1944,47 +1890,6 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_unlock(hdev);
}
-static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
-{
- struct hci_cp_create_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- if (status) {
- struct hci_conn *hcon;
-
- hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
- if (hcon)
- hci_conn_del(hcon);
- } else {
- amp_write_remote_assoc(hdev, cp->phy_handle);
- }
-
- hci_dev_unlock(hdev);
-}
-
-static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
-{
- struct hci_cp_accept_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- if (status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
- if (!cp)
- return;
-
- amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
{
struct hci_cp_le_create_conn *cp;
@@ -2998,10 +2903,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_read_clock(hdev, skb);
break;
- case HCI_OP_READ_LOCAL_AMP_ASSOC:
- hci_cc_read_local_amp_assoc(hdev, skb);
- break;
-
case HCI_OP_READ_INQ_RSP_TX_POWER:
hci_cc_read_inq_rsp_tx_power(hdev, skb);
break;
@@ -3106,10 +3007,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_set_adv_param(hdev, skb);
break;
- case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
- hci_cc_write_remote_amp_assoc(hdev, skb);
- break;
-
case HCI_OP_READ_RSSI:
hci_cc_read_rssi(hdev, skb);
break;
@@ -3193,14 +3090,6 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cs_setup_sync_conn(hdev, ev->status);
break;
- case HCI_OP_CREATE_PHY_LINK:
- hci_cs_create_phylink(hdev, ev->status);
- break;
-
- case HCI_OP_ACCEPT_PHY_LINK:
- hci_cs_accept_phylink(hdev, ev->status);
- break;
-
case HCI_OP_SNIFF_MODE:
hci_cs_sniff_mode(hdev, ev->status);
break;
@@ -4399,6 +4288,23 @@ unlock:
hci_dev_unlock(hdev);
}
+#if IS_ENABLED(CONFIG_BT_HS)
+static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_channel_selected *ev = (void *)skb->data;
+ struct hci_conn *hcon;
+
+ BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
+
+ skb_pull(skb, sizeof(*ev));
+
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (!hcon)
+ return;
+
+ amp_read_loc_assoc_final_data(hdev, hcon);
+}
+
static void hci_phy_link_complete_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -4522,6 +4428,7 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
+#endif
static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
@@ -5206,22 +5113,6 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
}
-static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_ev_channel_selected *ev = (void *) skb->data;
- struct hci_conn *hcon;
-
- BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
-
- skb_pull(skb, sizeof(*ev));
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- return;
-
- amp_read_loc_assoc_final_data(hdev, hcon);
-}
-
static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
u8 event, struct sk_buff *skb)
{
@@ -5442,14 +5333,15 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_le_meta_evt(hdev, skb);
break;
- case HCI_EV_CHANNEL_SELECTED:
- hci_chan_selected_evt(hdev, skb);
- break;
-
case HCI_EV_REMOTE_OOB_DATA_REQUEST:
hci_remote_oob_data_request_evt(hdev, skb);
break;
+#if IS_ENABLED(CONFIG_BT_HS)
+ case HCI_EV_CHANNEL_SELECTED:
+ hci_chan_selected_evt(hdev, skb);
+ break;
+
case HCI_EV_PHY_LINK_COMPLETE:
hci_phy_link_complete_evt(hdev, skb);
break;
@@ -5465,6 +5357,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
hci_disconn_phylink_complete_evt(hdev, skb);
break;
+#endif
case HCI_EV_NUM_COMP_BLOCKS:
hci_num_comp_blocks_evt(hdev, skb);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 244287706f91..586b3d580cfc 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1054,18 +1054,23 @@ static void l2cap_sock_kill(struct sock *sk)
sock_put(sk);
}
-static int __l2cap_wait_ack(struct sock *sk)
+static int __l2cap_wait_ack(struct sock *sk, struct l2cap_chan *chan)
{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
DECLARE_WAITQUEUE(wait, current);
int err = 0;
- int timeo = HZ/5;
+ int timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
+ /* Timeout to prevent infinite loop */
+ unsigned long timeout = jiffies + L2CAP_WAIT_ACK_TIMEOUT;
add_wait_queue(sk_sleep(sk), &wait);
set_current_state(TASK_INTERRUPTIBLE);
- while (chan->unacked_frames > 0 && chan->conn) {
+ do {
+ BT_DBG("Waiting for %d ACKs, timeout %04d ms",
+ chan->unacked_frames, time_after(jiffies, timeout) ? 0 :
+ jiffies_to_msecs(timeout - jiffies));
+
if (!timeo)
- timeo = HZ/5;
+ timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
@@ -1080,7 +1085,15 @@ static int __l2cap_wait_ack(struct sock *sk)
err = sock_error(sk);
if (err)
break;
- }
+
+ if (time_after(jiffies, timeout)) {
+ err = -ENOLINK;
+ break;
+ }
+
+ } while (chan->unacked_frames > 0 &&
+ chan->state == BT_CONNECTED);
+
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
@@ -1098,7 +1111,12 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
if (!sk)
return 0;
+ /* prevent sk structure from being freed whilst unlocked */
+ sock_hold(sk);
+
chan = l2cap_pi(sk)->chan;
+ /* prevent chan structure from being freed whilst unlocked */
+ l2cap_chan_hold(chan);
conn = chan->conn;
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
@@ -1110,8 +1128,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
if (!sk->sk_shutdown) {
- if (chan->mode == L2CAP_MODE_ERTM)
- err = __l2cap_wait_ack(sk);
+ if (chan->mode == L2CAP_MODE_ERTM &&
+ chan->unacked_frames > 0 &&
+ chan->state == BT_CONNECTED)
+ err = __l2cap_wait_ack(sk, chan);
sk->sk_shutdown = SHUTDOWN_MASK;
@@ -1134,6 +1154,11 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
if (conn)
mutex_unlock(&conn->chan_lock);
+ l2cap_chan_put(chan);
+ sock_put(sk);
+
+ BT_DBG("err: %d", err);
+
return err;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 92720f3fe573..e435438f95f0 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -6226,6 +6226,17 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
else
auto_conn = HCI_AUTO_CONN_REPORT;
+ /* Kernel internally uses conn_params with resolvable private
+ * address, but Add Device allows only identity addresses.
+ * Make sure it is enforced before calling
+ * hci_conn_params_lookup.
+ */
+ if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+ err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
+ goto unlock;
+ }
+
/* If the connection parameters don't exist for this device,
* they will be created and configured with defaults.
*/
@@ -6340,6 +6351,18 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
else
addr_type = ADDR_LE_DEV_RANDOM;
+ /* Kernel internally uses conn_params with resolvable private
+ * address, but Remove Device allows only identity addresses.
+ * Make sure it is enforced before calling
+ * hci_conn_params_lookup.
+ */
+ if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+ err = cmd->cmd_complete(cmd,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
+ goto unlock;
+ }
+
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
addr_type);
if (!params) {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 4ff77a16956c..0aa8f5cf46a1 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -339,6 +339,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_bridge_getlink = br_getlink,
.ndo_bridge_setlink = br_setlink,
.ndo_bridge_dellink = br_dellink,
+ .ndo_features_check = passthru_features_check,
};
static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a538cb1199a3..45e4757c6fd2 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -281,6 +281,7 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
br_fdb_delete_by_port(br, NULL, 0, 1);
br_vlan_flush(br);
+ br_multicast_dev_del(br);
del_timer_sync(&br->gc_timer);
br_sysfs_delbr(br->dev);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index c94321955db7..d747275fad18 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -85,6 +85,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
memset(&e, 0, sizeof(e));
e.ifindex = port->dev->ifindex;
e.state = p->state;
+ e.vid = p->addr.vid;
if (p->addr.proto == htons(ETH_P_IP))
e.addr.u.ip4 = p->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
@@ -230,7 +231,7 @@ errout:
}
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
- struct br_ip *group, int type)
+ struct br_ip *group, int type, u8 state)
{
struct br_mdb_entry entry;
@@ -241,9 +242,78 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
#if IS_ENABLED(CONFIG_IPV6)
entry.addr.u.ip6 = group->u.ip6;
#endif
+ entry.state = state;
+ entry.vid = group->vid;
__br_mdb_notify(dev, &entry, type);
}
+static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
+ struct net_device *dev,
+ int ifindex, u32 pid,
+ u32 seq, int type, unsigned int flags)
+{
+ struct br_port_msg *bpm;
+ struct nlmsghdr *nlh;
+ struct nlattr *nest;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ bpm = nlmsg_data(nlh);
+ memset(bpm, 0, sizeof(*bpm));
+ bpm->family = AF_BRIDGE;
+ bpm->ifindex = dev->ifindex;
+ nest = nla_nest_start(skb, MDBA_ROUTER);
+ if (!nest)
+ goto cancel;
+
+ if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
+ goto end;
+
+ nla_nest_end(skb, nest);
+ nlmsg_end(skb, nlh);
+ return 0;
+
+end:
+ nla_nest_end(skb, nest);
+cancel:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static inline size_t rtnl_rtr_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct br_port_msg))
+ + nla_total_size(sizeof(__u32));
+}
+
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+ int type)
+{
+ struct net *net = dev_net(dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+ int ifindex;
+
+ ifindex = port ? port->dev->ifindex : 0;
+ skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
+ if (!skb)
+ goto errout;
+
+ err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto errout;
+ }
+
+ rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
+ return;
+
+errout:
+ rtnl_set_sk_err(net, RTNLGRP_MDB, err);
+}
+
static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
{
if (entry->ifindex == 0)
@@ -263,6 +333,8 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
return false;
if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
return false;
+ if (entry->vid >= VLAN_VID_MASK)
+ return false;
return true;
}
@@ -374,6 +446,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
return -EINVAL;
memset(&ip, 0, sizeof(ip));
+ ip.vid = entry->vid;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
@@ -391,8 +464,11 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
+ unsigned short vid = VLAN_N_VID;
+ struct net_device *dev, *pdev;
struct br_mdb_entry *entry;
- struct net_device *dev;
+ struct net_bridge_port *p;
+ struct net_port_vlans *pv;
struct net_bridge *br;
int err;
@@ -402,9 +478,32 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
br = netdev_priv(dev);
- err = __br_mdb_add(net, br, entry);
- if (!err)
- __br_mdb_notify(dev, entry, RTM_NEWMDB);
+ /* If vlan filtering is enabled and VLAN is not specified
+ * install mdb entry on all vlans configured on the port.
+ */
+ pdev = __dev_get_by_index(net, entry->ifindex);
+ if (!pdev)
+ return -ENODEV;
+
+ p = br_port_get_rtnl(pdev);
+ if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+ return -EINVAL;
+
+ pv = nbp_get_vlan_info(p);
+ if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+ entry->vid = vid;
+ err = __br_mdb_add(net, br, entry);
+ if (err)
+ break;
+ __br_mdb_notify(dev, entry, RTM_NEWMDB);
+ }
+ } else {
+ err = __br_mdb_add(net, br, entry);
+ if (!err)
+ __br_mdb_notify(dev, entry, RTM_NEWMDB);
+ }
+
return err;
}
@@ -421,6 +520,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
return -EINVAL;
memset(&ip, 0, sizeof(ip));
+ ip.vid = entry->vid;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
@@ -465,8 +565,12 @@ unlock:
static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
{
- struct net_device *dev;
+ struct net *net = sock_net(skb->sk);
+ unsigned short vid = VLAN_N_VID;
+ struct net_device *dev, *pdev;
struct br_mdb_entry *entry;
+ struct net_bridge_port *p;
+ struct net_port_vlans *pv;
struct net_bridge *br;
int err;
@@ -476,9 +580,31 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
br = netdev_priv(dev);
- err = __br_mdb_del(br, entry);
- if (!err)
- __br_mdb_notify(dev, entry, RTM_DELMDB);
+ /* If vlan filtering is enabled and VLAN is not specified
+ * delete mdb entry on all vlans configured on the port.
+ */
+ pdev = __dev_get_by_index(net, entry->ifindex);
+ if (!pdev)
+ return -ENODEV;
+
+ p = br_port_get_rtnl(pdev);
+ if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+ return -EINVAL;
+
+ pv = nbp_get_vlan_info(p);
+ if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+ entry->vid = vid;
+ err = __br_mdb_del(br, entry);
+ if (!err)
+ __br_mdb_notify(dev, entry, RTM_DELMDB);
+ }
+ } else {
+ err = __br_mdb_del(br, entry);
+ if (!err)
+ __br_mdb_notify(dev, entry, RTM_DELMDB);
+ }
+
return err;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 0b39dcc65b94..0752796fe0ba 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -283,6 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
+ br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+ p->state);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
if (!mp->ports && !mp->mglist &&
@@ -704,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
if (unlikely(!p))
goto err;
rcu_assign_pointer(*pp, p);
- br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+ br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
found:
mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -764,6 +766,7 @@ static void br_multicast_router_expired(unsigned long data)
goto out;
hlist_del_init_rcu(&port->rlist);
+ br_rtr_notify(br->dev, port, RTM_DELMDB);
out:
spin_unlock(&br->multicast_lock);
@@ -924,6 +927,15 @@ void br_multicast_add_port(struct net_bridge_port *port)
void br_multicast_del_port(struct net_bridge_port *port)
{
+ struct net_bridge *br = port->br;
+ struct net_bridge_port_group *pg;
+ struct hlist_node *n;
+
+ /* Take care of the remaining groups, only perm ones should be left */
+ spin_lock_bh(&br->multicast_lock);
+ hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
+ br_multicast_del_pg(br, pg);
+ spin_unlock_bh(&br->multicast_lock);
del_timer_sync(&port->multicast_router_timer);
}
@@ -963,10 +975,13 @@ void br_multicast_disable_port(struct net_bridge_port *port)
spin_lock(&br->multicast_lock);
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
- br_multicast_del_pg(br, pg);
+ if (pg->state == MDB_TEMPORARY)
+ br_multicast_del_pg(br, pg);
- if (!hlist_unhashed(&port->rlist))
+ if (!hlist_unhashed(&port->rlist)) {
hlist_del_init_rcu(&port->rlist);
+ br_rtr_notify(br->dev, port, RTM_DELMDB);
+ }
del_timer(&port->multicast_router_timer);
del_timer(&port->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
@@ -1204,6 +1219,7 @@ static void br_multicast_add_router(struct net_bridge *br,
hlist_add_behind_rcu(&port->rlist, slot);
else
hlist_add_head_rcu(&port->rlist, &br->router_list);
+ br_rtr_notify(br->dev, port, RTM_NEWMDB);
}
static void br_multicast_mark_router(struct net_bridge *br,
@@ -1437,7 +1453,8 @@ br_multicast_leave_group(struct net_bridge *br,
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
- br_mdb_notify(br->dev, port, group, RTM_DELMDB);
+ br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+ p->state);
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
@@ -1754,12 +1771,6 @@ void br_multicast_open(struct net_bridge *br)
void br_multicast_stop(struct net_bridge *br)
{
- struct net_bridge_mdb_htable *mdb;
- struct net_bridge_mdb_entry *mp;
- struct hlist_node *n;
- u32 ver;
- int i;
-
del_timer_sync(&br->multicast_router_timer);
del_timer_sync(&br->ip4_other_query.timer);
del_timer_sync(&br->ip4_own_query.timer);
@@ -1767,6 +1778,15 @@ void br_multicast_stop(struct net_bridge *br)
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
+}
+
+void br_multicast_dev_del(struct net_bridge *br)
+{
+ struct net_bridge_mdb_htable *mdb;
+ struct net_bridge_mdb_entry *mp;
+ struct hlist_node *n;
+ u32 ver;
+ int i;
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
@@ -1834,8 +1854,10 @@ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
p->multicast_router = val;
err = 0;
- if (val < 2 && !hlist_unhashed(&p->rlist))
+ if (val < 2 && !hlist_unhashed(&p->rlist)) {
hlist_del_init_rcu(&p->rlist);
+ br_rtr_notify(br->dev, p, RTM_DELMDB);
+ }
if (val == 1)
break;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index c8b9bcfe997e..0a6f095bb0c9 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -49,9 +49,9 @@ static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables __read_mostly = 1;
static int brnf_call_ip6tables __read_mostly = 1;
static int brnf_call_arptables __read_mostly = 1;
-static int brnf_filter_vlan_tagged __read_mostly = 0;
-static int brnf_filter_pppoe_tagged __read_mostly = 0;
-static int brnf_pass_vlan_indev __read_mostly = 0;
+static int brnf_filter_vlan_tagged __read_mostly;
+static int brnf_filter_pppoe_tagged __read_mostly;
+static int brnf_pass_vlan_indev __read_mostly;
#else
#define brnf_call_iptables 1
#define brnf_call_ip6tables 1
@@ -284,7 +284,7 @@ int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
nf_bridge->neigh_header,
ETH_HLEN-ETH_ALEN);
/* tell br_dev_xmit to continue with forwarding */
- nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+ nf_bridge->bridged_dnat = 1;
/* FIXME Need to refragment */
ret = neigh->output(neigh, skb);
}
@@ -356,7 +356,7 @@ static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
- nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+ nf_bridge->in_prerouting = 0;
if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -444,7 +444,7 @@ struct net_device *setup_pre_routing(struct sk_buff *skb)
nf_bridge->pkt_otherhost = true;
}
- nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
+ nf_bridge->in_prerouting = 1;
nf_bridge->physindev = skb->dev;
skb->dev = brnf_get_logical_dev(skb, skb->dev);
@@ -850,10 +850,8 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- if (skb->nf_bridge &&
- !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
+ if (skb->nf_bridge && !skb->nf_bridge->in_prerouting)
return NF_STOP;
- }
return NF_ACCEPT;
}
@@ -872,7 +870,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb_pull(skb, ETH_HLEN);
- nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
+ nf_bridge->bridged_dnat = 0;
BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
@@ -887,7 +885,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
static int br_nf_dev_xmit(struct sk_buff *skb)
{
- if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
+ if (skb->nf_bridge && skb->nf_bridge->bridged_dnat) {
br_nf_pre_routing_finish_bridge_slow(skb);
return 1;
}
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 13b7d1e3d185..77383bfe7ea3 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -174,7 +174,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
- nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+ nf_bridge->in_prerouting = 0;
if (br_nf_ipv6_daddr_was_changed(skb, nf_bridge)) {
skb_dst_drop(skb);
v6ops->route_input(skb);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4d74a0639c4c..0f2408f6cdfe 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -166,8 +166,6 @@ static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
- vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
-
vinfo.vid = vid_end;
vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
@@ -730,6 +728,7 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
[IFLA_BR_STP_STATE] = { .type = NLA_U32 },
[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
+ [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -777,6 +776,14 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
br_stp_set_bridge_priority(br, priority);
}
+ if (data[IFLA_BR_VLAN_FILTERING]) {
+ u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
+
+ err = __br_vlan_filter_toggle(br, vlan_filter);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -788,6 +795,7 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
0;
}
@@ -800,13 +808,15 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
u32 stp_enabled = br->stp_enabled;
u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
+ u8 vlan_enabled = br_vlan_enabled(br);
if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
- nla_put_u16(skb, IFLA_BR_PRIORITY, priority))
+ nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
+ nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled))
return -EMSGSIZE;
return 0;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 8b21146b24a0..3d95647039d0 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -466,6 +466,7 @@ void br_multicast_disable_port(struct net_bridge_port *port);
void br_multicast_init(struct net_bridge *br);
void br_multicast_open(struct net_bridge *br);
void br_multicast_stop(struct net_bridge *br);
+void br_multicast_dev_del(struct net_bridge *br);
void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb);
void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
@@ -488,7 +489,9 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
void br_mdb_init(void);
void br_mdb_uninit(void);
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
- struct br_ip *group, int type);
+ struct br_ip *group, int type, u8 state);
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+ int type);
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -565,6 +568,10 @@ static inline void br_multicast_stop(struct net_bridge *br)
{
}
+static inline void br_multicast_dev_del(struct net_bridge *br)
+{
+}
+
static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb)
{
@@ -607,6 +614,7 @@ int br_vlan_delete(struct net_bridge *br, u16 vid);
void br_vlan_flush(struct net_bridge *br);
bool br_vlan_find(struct net_bridge *br, u16 vid);
void br_recalculate_fwd_mask(struct net_bridge *br);
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
int br_vlan_init(struct net_bridge *br);
@@ -764,6 +772,12 @@ static inline int br_vlan_enabled(struct net_bridge *br)
{
return 0;
}
+
+static inline int __br_vlan_filter_toggle(struct net_bridge *br,
+ unsigned long val)
+{
+ return -EOPNOTSUPP;
+}
#endif
struct nf_br_ops {
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 0d41f81838ff..3cef6892c0bb 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -468,21 +468,27 @@ void br_recalculate_fwd_mask(struct net_bridge *br)
~(1u << br->group_addr[5]);
}
-int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
{
- if (!rtnl_trylock())
- return restart_syscall();
-
if (br->vlan_enabled == val)
- goto unlock;
+ return 0;
br->vlan_enabled = val;
br_manage_promisc(br);
recalculate_group_addr(br);
br_recalculate_fwd_mask(br);
-unlock:
+ return 0;
+}
+
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+{
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ __br_vlan_filter_toggle(br, val);
rtnl_unlock();
+
return 0;
}
diff --git a/net/core/Makefile b/net/core/Makefile
index fec0856dd6c0..086b01fbe1bd 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -23,3 +23,4 @@ obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
+obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
diff --git a/net/core/dev.c b/net/core/dev.c
index a8e4dd430285..4870c3556a5a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3061,6 +3061,16 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
else
skb_dst_force(skb);
+#ifdef CONFIG_NET_SWITCHDEV
+ /* Don't forward if offload device already forwarded */
+ if (skb->offload_fwd_mark &&
+ skb->offload_fwd_mark == dev->offload_fwd_mark) {
+ consume_skb(skb);
+ rc = NET_XMIT_SUCCESS;
+ goto out;
+ }
+#endif
+
txq = netdev_pick_tx(dev, skb, accel_priv);
q = rcu_dereference_bh(txq->qdisc);
@@ -3645,7 +3655,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
qdisc_skb_cb(skb)->pkt_len = skb->len;
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
- qdisc_bstats_update_cpu(cl->q, skb);
+ qdisc_bstats_cpu_update(cl->q, skb);
switch (tc_classify(skb, cl, &cl_res)) {
case TC_ACT_OK:
@@ -3653,7 +3663,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
skb->tc_index = TC_H_MIN(cl_res.classid);
break;
case TC_ACT_SHOT:
- qdisc_qstats_drop_cpu(cl->q);
+ qdisc_qstats_cpu_drop(cl->q);
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
kfree_skb(skb);
@@ -4985,7 +4995,7 @@ EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
* Gets the next netdev_adjacent->private from the dev's lower neighbour
* list, starting from iter position. The caller must hold either hold the
* RTNL lock or its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
*/
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter)
@@ -5040,7 +5050,7 @@ EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
* Gets the next netdev_adjacent from the dev's lower neighbour
* list, starting from iter position. The caller must hold RTNL lock or
* its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
*/
void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
{
@@ -6075,6 +6085,26 @@ int dev_get_phys_port_name(struct net_device *dev,
EXPORT_SYMBOL(dev_get_phys_port_name);
/**
+ * dev_change_proto_down - update protocol port state information
+ * @dev: device
+ * @proto_down: new value
+ *
+ * This info can be used by switch drivers to set the phys state of the
+ * port.
+ */
+int dev_change_proto_down(struct net_device *dev, bool proto_down)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!ops->ndo_change_proto_down)
+ return -EOPNOTSUPP;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return ops->ndo_change_proto_down(dev, proto_down);
+}
+EXPORT_SYMBOL(dev_change_proto_down);
+
+/**
* dev_new_index - allocate an ifindex
* @net: the applicable net namespace
*
@@ -7639,7 +7669,7 @@ static int __init net_dev_init(void)
open_softirq(NET_RX_SOFTIRQ, net_rx_action);
hotcpu_notifier(dev_cpu_callback, 0);
- dst_init();
+ dst_subsys_init();
rc = 0;
out:
return rc;
diff --git a/net/core/dst.c b/net/core/dst.c
index 002144bea935..f8694d1b8702 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -22,6 +22,7 @@
#include <linux/prefetch.h>
#include <net/dst.h>
+#include <net/dst_metadata.h>
/*
* Theory of operations:
@@ -158,19 +159,10 @@ const u32 dst_default_metrics[RTAX_MAX + 1] = {
[RTAX_MAX] = 0xdeadbeef,
};
-
-void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
- int initial_ref, int initial_obsolete, unsigned short flags)
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+ struct net_device *dev, int initial_ref, int initial_obsolete,
+ unsigned short flags)
{
- struct dst_entry *dst;
-
- if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
- if (ops->gc(ops))
- return NULL;
- }
- dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
- if (!dst)
- return NULL;
dst->child = NULL;
dst->dev = dev;
if (dev)
@@ -200,6 +192,25 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst->next = NULL;
if (!(flags & DST_NOCOUNT))
dst_entries_add(ops, 1);
+}
+EXPORT_SYMBOL(dst_init);
+
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
+ int initial_ref, int initial_obsolete, unsigned short flags)
+{
+ struct dst_entry *dst;
+
+ if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
+ if (ops->gc(ops))
+ return NULL;
+ }
+
+ dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+ if (!dst)
+ return NULL;
+
+ dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
+
return dst;
}
EXPORT_SYMBOL(dst_alloc);
@@ -248,7 +259,11 @@ again:
dst->ops->destroy(dst);
if (dst->dev)
dev_put(dst->dev);
- kmem_cache_free(dst->ops->kmem_cachep, dst);
+
+ if (dst->flags & DST_METADATA)
+ kfree(dst);
+ else
+ kmem_cache_free(dst->ops->kmem_cachep, dst);
dst = child;
if (dst) {
@@ -329,6 +344,70 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
}
EXPORT_SYMBOL(__dst_destroy_metrics_generic);
+static struct dst_ops md_dst_ops = {
+ .family = AF_UNSPEC,
+};
+
+static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb)
+{
+ WARN_ONCE(1, "Attempting to call output on metadata dst\n");
+ kfree_skb(skb);
+ return 0;
+}
+
+static int dst_md_discard(struct sk_buff *skb)
+{
+ WARN_ONCE(1, "Attempting to call input on metadata dst\n");
+ kfree_skb(skb);
+ return 0;
+}
+
+static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
+{
+ struct dst_entry *dst;
+
+ dst = &md_dst->dst;
+ dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
+ DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
+
+ dst->input = dst_md_discard;
+ dst->output = dst_md_discard_sk;
+
+ memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
+ md_dst->opts_len = optslen;
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
+{
+ struct metadata_dst *md_dst;
+
+ md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
+ if (!md_dst)
+ return NULL;
+
+ __metadata_dst_init(md_dst, optslen);
+
+ return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc);
+
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
+{
+ int cpu;
+ struct metadata_dst __percpu *md_dst;
+
+ md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
+ __alignof__(struct metadata_dst), flags);
+ if (!md_dst)
+ return NULL;
+
+ for_each_possible_cpu(cpu)
+ __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
+
+ return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
+
/* Dirty hack. We did it in 2.2 (in __dst_free),
* we have _very_ good reasons not to repeat
* this mistake in 2.3, but we have no choice
@@ -393,7 +472,7 @@ static struct notifier_block dst_dev_notifier = {
.priority = -10, /* must be called after other network notifiers */
};
-void __init dst_init(void)
+void __init dst_subsys_init(void)
{
register_netdevice_notifier(&dst_dev_notifier);
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 9a12668f7d62..ae8306e7c56f 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -16,6 +16,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/fib_rules.h>
+#include <net/ip_tunnels.h>
int fib_default_rule_add(struct fib_rules_ops *ops,
u32 pref, u32 table, u32 flags)
@@ -186,6 +187,9 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
goto out;
+ if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
+ goto out;
+
ret = ops->match(rule, fl, flags);
out:
return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -330,6 +334,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
if (tb[FRA_FWMASK])
rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
+ if (tb[FRA_TUN_ID])
+ rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
+
rule->action = frh->action;
rule->flags = frh->flags;
rule->table = frh_get_table(frh, tb);
@@ -407,6 +414,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
if (unresolved)
ops->unresolved_rules++;
+ if (rule->tun_id)
+ ip_tunnel_need_metadata();
+
notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
flush_route_cache(ops);
rules_ops_put(ops);
@@ -473,6 +483,10 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
(rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
continue;
+ if (tb[FRA_TUN_ID] &&
+ (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
+ continue;
+
if (!ops->compare(rule, frh, tb))
continue;
@@ -487,6 +501,9 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
goto errout;
}
+ if (rule->tun_id)
+ ip_tunnel_unneed_metadata();
+
list_del_rcu(&rule->list);
if (rule->action == FR_ACT_GOTO) {
@@ -535,7 +552,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
+ nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
+ nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
+ nla_total_size(4) /* FRA_FWMARK */
- + nla_total_size(4); /* FRA_FWMASK */
+ + nla_total_size(4) /* FRA_FWMASK */
+ + nla_total_size(8); /* FRA_TUN_ID */
if (ops->nlmsg_payload)
payload += ops->nlmsg_payload(rule);
@@ -591,7 +609,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
((rule->mark_mask || rule->mark) &&
nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
(rule->target &&
- nla_put_u32(skb, FRA_GOTO, rule->target)))
+ nla_put_u32(skb, FRA_GOTO, rule->target)) ||
+ (rule->tun_id &&
+ nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)))
goto nla_put_failure;
if (rule->suppress_ifgroup != -1) {
diff --git a/net/core/filter.c b/net/core/filter.c
index be3098fb65e4..a50dbfa83ad9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -47,6 +47,8 @@
#include <linux/if_vlan.h>
#include <linux/bpf.h>
#include <net/sch_generic.h>
+#include <net/cls_cgroup.h>
+#include <net/dst_metadata.h>
/**
* sk_filter - run a packet through a socket filter
@@ -1424,6 +1426,136 @@ const struct bpf_func_proto bpf_clone_redirect_proto = {
.arg3_type = ARG_ANYTHING,
};
+static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ return task_get_classid((struct sk_buff *) (unsigned long) r1);
+}
+
+static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
+ .func = bpf_get_cgroup_classid,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ __be16 vlan_proto = (__force __be16) r2;
+
+ if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
+ vlan_proto != htons(ETH_P_8021AD)))
+ vlan_proto = htons(ETH_P_8021Q);
+
+ return skb_vlan_push(skb, vlan_proto, vlan_tci);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_push_proto = {
+ .func = bpf_skb_vlan_push,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
+
+static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+
+ return skb_vlan_pop(skb);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
+ .func = bpf_skb_vlan_pop,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
+
+bool bpf_helper_changes_skb_data(void *func)
+{
+ if (func == bpf_skb_vlan_push)
+ return true;
+ if (func == bpf_skb_vlan_pop)
+ return true;
+ return false;
+}
+
+static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
+ struct ip_tunnel_info *info = skb_tunnel_info(skb, AF_INET);
+
+ if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
+ return -EINVAL;
+
+ to->tunnel_id = be64_to_cpu(info->key.tun_id);
+ to->remote_ipv4 = be32_to_cpu(info->key.ipv4_src);
+
+ return 0;
+}
+
+const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
+ .func = bpf_skb_get_tunnel_key,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_STACK,
+ .arg3_type = ARG_CONST_STACK_SIZE,
+ .arg4_type = ARG_ANYTHING,
+};
+
+static struct metadata_dst __percpu *md_dst;
+
+static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
+ struct metadata_dst *md = this_cpu_ptr(md_dst);
+ struct ip_tunnel_info *info;
+
+ if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags))
+ return -EINVAL;
+
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *) md);
+ skb_dst_set(skb, (struct dst_entry *) md);
+
+ info = &md->u.tun_info;
+ info->mode = IP_TUNNEL_INFO_TX;
+ info->key.tun_id = cpu_to_be64(from->tunnel_id);
+ info->key.ipv4_dst = cpu_to_be32(from->remote_ipv4);
+
+ return 0;
+}
+
+const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
+ .func = bpf_skb_set_tunnel_key,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_STACK,
+ .arg3_type = ARG_CONST_STACK_SIZE,
+ .arg4_type = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
+{
+ if (!md_dst) {
+ /* race is not possible, since it's called from
+ * verifier that is holding verifier mutex
+ */
+ md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
+ if (!md_dst)
+ return NULL;
+ }
+ return &bpf_skb_set_tunnel_key_proto;
+}
+
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
{
@@ -1461,6 +1593,16 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_l4_csum_replace_proto;
case BPF_FUNC_clone_redirect:
return &bpf_clone_redirect_proto;
+ case BPF_FUNC_get_cgroup_classid:
+ return &bpf_get_cgroup_classid_proto;
+ case BPF_FUNC_skb_vlan_push:
+ return &bpf_skb_vlan_push_proto;
+ case BPF_FUNC_skb_vlan_pop:
+ return &bpf_skb_vlan_pop_proto;
+ case BPF_FUNC_skb_get_tunnel_key:
+ return &bpf_skb_get_tunnel_key_proto;
+ case BPF_FUNC_skb_set_tunnel_key:
+ return bpf_get_skb_set_tunnel_key_proto();
default:
return sk_filter_func_proto(func_id);
}
@@ -1569,6 +1711,13 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
offsetof(struct net_device, ifindex));
break;
+ case offsetof(struct __sk_buff, hash):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ offsetof(struct sk_buff, hash));
+ break;
+
case offsetof(struct __sk_buff, mark):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2a834c6179b9..11e6540fa386 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -590,6 +590,15 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
}
EXPORT_SYMBOL(make_flow_keys_digest);
+static inline void __skb_set_sw_hash(struct sk_buff *skb, u32 hash,
+ struct flow_keys *keys)
+{
+ if (keys->ports.ports)
+ skb->l4_hash = 1;
+ skb->sw_hash = 1;
+ skb->hash = hash;
+}
+
/**
* __skb_get_hash: calculate a flow hash
* @skb: sk_buff to calculate flow hash from
@@ -609,10 +618,8 @@ void __skb_get_hash(struct sk_buff *skb)
hash = ___skb_get_hash(skb, &keys, hashrnd);
if (!hash)
return;
- if (keys.ports.ports)
- skb->l4_hash = 1;
- skb->sw_hash = 1;
- skb->hash = hash;
+
+ __skb_set_sw_hash(skb, hash, &keys);
}
EXPORT_SYMBOL(__skb_get_hash);
@@ -624,6 +631,49 @@ __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
}
EXPORT_SYMBOL(skb_get_hash_perturb);
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6)
+{
+ struct flow_keys keys;
+
+ memset(&keys, 0, sizeof(keys));
+
+ memcpy(&keys.addrs.v6addrs.src, &fl6->saddr,
+ sizeof(keys.addrs.v6addrs.src));
+ memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr,
+ sizeof(keys.addrs.v6addrs.dst));
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ keys.ports.src = fl6->fl6_sport;
+ keys.ports.dst = fl6->fl6_dport;
+ keys.keyid.keyid = fl6->fl6_gre_key;
+ keys.tags.flow_label = (__force u32)fl6->flowlabel;
+ keys.basic.ip_proto = fl6->flowi6_proto;
+
+ __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), &keys);
+
+ return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi6);
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4)
+{
+ struct flow_keys keys;
+
+ memset(&keys, 0, sizeof(keys));
+
+ keys.addrs.v4addrs.src = fl4->saddr;
+ keys.addrs.v4addrs.dst = fl4->daddr;
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ keys.ports.src = fl4->fl4_sport;
+ keys.ports.dst = fl4->fl4_dport;
+ keys.keyid.keyid = fl4->fl4_gre_key;
+ keys.basic.ip_proto = fl4->flowi4_proto;
+
+ __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), &keys);
+
+ return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi4);
+
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
const struct flow_keys *keys, int hlen)
{
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
new file mode 100644
index 000000000000..5d6d8e3d450a
--- /dev/null
+++ b/net/core/lwtunnel.c
@@ -0,0 +1,243 @@
+/*
+ * lwtunnel Infrastructure for light weight tunnels like mpls
+ *
+ * Authors: Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/lwtunnel.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <net/lwtunnel.h>
+#include <net/rtnetlink.h>
+#include <net/ip6_fib.h>
+
+struct lwtunnel_state *lwtunnel_state_alloc(int encap_len)
+{
+ struct lwtunnel_state *lws;
+
+ lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC);
+
+ return lws;
+}
+EXPORT_SYMBOL(lwtunnel_state_alloc);
+
+static const struct lwtunnel_encap_ops __rcu *
+ lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
+ unsigned int num)
+{
+ if (num > LWTUNNEL_ENCAP_MAX)
+ return -ERANGE;
+
+ return !cmpxchg((const struct lwtunnel_encap_ops **)
+ &lwtun_encaps[num],
+ NULL, ops) ? 0 : -1;
+}
+EXPORT_SYMBOL(lwtunnel_encap_add_ops);
+
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
+ unsigned int encap_type)
+{
+ int ret;
+
+ if (encap_type == LWTUNNEL_ENCAP_NONE ||
+ encap_type > LWTUNNEL_ENCAP_MAX)
+ return -ERANGE;
+
+ ret = (cmpxchg((const struct lwtunnel_encap_ops **)
+ &lwtun_encaps[encap_type],
+ ops, NULL) == ops) ? 0 : -1;
+
+ synchronize_net();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_encap_del_ops);
+
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap, struct lwtunnel_state **lws)
+{
+ const struct lwtunnel_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (encap_type == LWTUNNEL_ENCAP_NONE ||
+ encap_type > LWTUNNEL_ENCAP_MAX)
+ return ret;
+
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[encap_type]);
+ if (likely(ops && ops->build_state))
+ ret = ops->build_state(dev, encap, lws);
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
+{
+ const struct lwtunnel_encap_ops *ops;
+ struct nlattr *nest;
+ int ret = -EINVAL;
+
+ if (!lwtstate)
+ return 0;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+ lwtstate->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ ret = -EOPNOTSUPP;
+ nest = nla_nest_start(skb, RTA_ENCAP);
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+ if (likely(ops && ops->fill_encap))
+ ret = ops->fill_encap(skb, lwtstate);
+ rcu_read_unlock();
+
+ if (ret)
+ goto nla_put_failure;
+ nla_nest_end(skb, nest);
+ ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type);
+ if (ret)
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+
+ return (ret == -EOPNOTSUPP ? 0 : ret);
+}
+EXPORT_SYMBOL(lwtunnel_fill_encap);
+
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+ const struct lwtunnel_encap_ops *ops;
+ int ret = 0;
+
+ if (!lwtstate)
+ return 0;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+ lwtstate->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+ if (likely(ops && ops->get_encap_size))
+ ret = nla_total_size(ops->get_encap_size(lwtstate));
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_get_encap_size);
+
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ const struct lwtunnel_encap_ops *ops;
+ int ret = 0;
+
+ if (!a && !b)
+ return 0;
+
+ if (!a || !b)
+ return 1;
+
+ if (a->type != b->type)
+ return 1;
+
+ if (a->type == LWTUNNEL_ENCAP_NONE ||
+ a->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[a->type]);
+ if (likely(ops && ops->cmp_encap))
+ ret = ops->cmp_encap(a, b);
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_cmp_encap);
+
+int __lwtunnel_output(struct sock *sk, struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ const struct lwtunnel_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (!lwtstate)
+ goto drop;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+ lwtstate->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+ if (likely(ops && ops->output))
+ ret = ops->output(sk, skb);
+ rcu_read_unlock();
+
+ if (ret == -EOPNOTSUPP)
+ goto drop;
+
+ return ret;
+
+drop:
+ kfree_skb(skb);
+
+ return ret;
+}
+
+int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+{
+ struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ struct lwtunnel_state *lwtstate = NULL;
+
+ if (rt) {
+ lwtstate = rt->rt6i_lwtstate;
+ skb->dev = rt->dst.dev;
+ }
+
+ skb->protocol = htons(ETH_P_IPV6);
+
+ return __lwtunnel_output(sk, skb, lwtstate);
+}
+EXPORT_SYMBOL(lwtunnel_output6);
+
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct rtable *rt = (struct rtable *)skb_dst(skb);
+ struct lwtunnel_state *lwtstate = NULL;
+
+ if (rt) {
+ lwtstate = rt->rt_lwtstate;
+ skb->dev = rt->dst.dev;
+ }
+
+ skb->protocol = htons(ETH_P_IP);
+
+ return __lwtunnel_output(sk, skb, lwtstate);
+}
+EXPORT_SYMBOL(lwtunnel_output);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 84195dacb8b6..2b515ba7e94f 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -274,8 +274,12 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
(entries >= tbl->gc_thresh2 &&
time_after(now, tbl->last_flush + 5 * HZ))) {
if (!neigh_forced_gc(tbl) &&
- entries >= tbl->gc_thresh3)
+ entries >= tbl->gc_thresh3) {
+ net_info_ratelimited("%s: neighbor table overflow!\n",
+ tbl->id);
+ NEIGH_CACHE_STAT_INC(tbl, table_fulls);
goto out_entries;
+ }
}
n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
@@ -1849,6 +1853,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
ndst.ndts_forced_gc_runs += st->forced_gc_runs;
+ ndst.ndts_table_fulls += st->table_fulls;
}
if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
@@ -2717,12 +2722,12 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
struct neigh_statistics *st = v;
if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
+ seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
return 0;
}
seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
- "%08lx %08lx %08lx %08lx %08lx\n",
+ "%08lx %08lx %08lx %08lx %08lx %08lx\n",
atomic_read(&tbl->entries),
st->allocs,
@@ -2739,7 +2744,8 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
st->periodic_gc_runs,
st->forced_gc_runs,
- st->unres_discards
+ st->unres_discards,
+ st->table_fulls
);
return 0;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 18b34d771ed4..39ec6949c1e6 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -404,6 +404,19 @@ static ssize_t group_store(struct device *dev, struct device_attribute *attr,
NETDEVICE_SHOW(group, fmt_dec);
static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
+static int change_proto_down(struct net_device *dev, unsigned long proto_down)
+{
+ return dev_change_proto_down(dev, (bool) proto_down);
+}
+
+static ssize_t proto_down_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_proto_down);
+}
+NETDEVICE_SHOW_RW(proto_down, fmt_dec);
+
static ssize_t phys_port_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -501,6 +514,7 @@ static struct attribute *net_class_attrs[] = {
&dev_attr_phys_port_id.attr,
&dev_attr_phys_port_name.attr,
&dev_attr_phys_switch_id.attr,
+ &dev_attr_proto_down.attr,
NULL,
};
ATTRIBUTE_GROUPS(net_class);
@@ -712,14 +726,17 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
old_map = rcu_dereference_protected(queue->rps_map,
lockdep_is_held(&rps_map_lock));
rcu_assign_pointer(queue->rps_map, map);
- spin_unlock(&rps_map_lock);
if (map)
static_key_slow_inc(&rps_needed);
- if (old_map) {
- kfree_rcu(old_map, rcu);
+ if (old_map)
static_key_slow_dec(&rps_needed);
- }
+
+ spin_unlock(&rps_map_lock);
+
+ if (old_map)
+ kfree_rcu(old_map, rcu);
+
free_cpumask_var(mask);
return len;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1cbd209192ea..de8d5cc5eb24 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -273,7 +273,6 @@ struct pktgen_dev {
/* runtime counters relating to clone_skb */
- __u64 allocated_skbs;
__u32 clone_count;
int last_ok; /* Was last skb sent?
* Or a failed transmit of some sort?
@@ -2279,7 +2278,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
{
- pkt_dev->pkt_overhead = 0;
+ pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@@ -2788,6 +2787,7 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
} else {
skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
}
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
return skb;
}
@@ -3397,7 +3397,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
return;
}
pkt_dev->last_pkt_size = pkt_dev->skb->len;
- pkt_dev->allocated_skbs++;
pkt_dev->clone_count = 0; /* reset counter */
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index dc004b1e1f85..788ceed39463 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -896,7 +896,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
- + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
+ + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+ + nla_total_size(1); /* IFLA_PROTO_DOWN */
+
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1082,7 +1084,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
(dev->ifalias &&
nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
nla_put_u32(skb, IFLA_CARRIER_CHANGES,
- atomic_read(&dev->carrier_changes)))
+ atomic_read(&dev->carrier_changes)) ||
+ nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
goto nla_put_failure;
if (1) {
@@ -1319,6 +1322,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
[IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
[IFLA_LINK_NETNSID] = { .type = NLA_S32 },
+ [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1861,6 +1865,14 @@ static int do_setlink(const struct sk_buff *skb,
}
err = 0;
+ if (tb[IFLA_PROTO_DOWN]) {
+ err = dev_change_proto_down(dev,
+ nla_get_u8(tb[IFLA_PROTO_DOWN]));
+ if (err)
+ goto errout;
+ status |= DO_SETLINK_NOTIFY;
+ }
+
errout:
if (status & DO_SETLINK_MODIFIED) {
if (status & DO_SETLINK_NOTIFY)
@@ -1951,16 +1963,30 @@ static int rtnl_group_dellink(const struct net *net, int group)
return 0;
}
+int rtnl_delete_link(struct net_device *dev)
+{
+ const struct rtnl_link_ops *ops;
+ LIST_HEAD(list_kill);
+
+ ops = dev->rtnl_link_ops;
+ if (!ops || !ops->dellink)
+ return -EOPNOTSUPP;
+
+ ops->dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtnl_delete_link);
+
static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
- const struct rtnl_link_ops *ops;
struct net_device *dev;
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
int err;
- LIST_HEAD(list_kill);
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
@@ -1982,13 +2008,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (!dev)
return -ENODEV;
- ops = dev->rtnl_link_ops;
- if (!ops || !ops->dellink)
- return -EOPNOTSUPP;
-
- ops->dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
- return 0;
+ return rtnl_delete_link(dev);
}
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 43d3dd62fcc8..42689d5c468c 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -60,11 +60,15 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
struct phy_device *phydev;
unsigned int type;
+ if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->drv)
+ return false;
+
if (skb_headroom(skb) < ETH_HLEN)
return false;
+
__skb_push(skb, ETH_HLEN);
- type = classify(skb);
+ type = ptp_classify_raw(skb);
__skb_pull(skb, ETH_HLEN);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index b445d492c115..78d4ac97aae3 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -574,7 +574,7 @@ static int dsa_of_probe(struct device *dev)
{
struct device_node *np = dev->of_node;
struct device_node *child, *mdio, *ethernet, *port, *link;
- struct mii_bus *mdio_bus;
+ struct mii_bus *mdio_bus, *mdio_bus_switch;
struct net_device *ethernet_dev;
struct dsa_platform_data *pd;
struct dsa_chip_data *cd;
@@ -636,6 +636,16 @@ static int dsa_of_probe(struct device *dev)
if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
cd->eeprom_len = eeprom_len;
+ mdio = of_parse_phandle(child, "mii-bus", 0);
+ if (mdio) {
+ mdio_bus_switch = of_mdio_find_bus(mdio);
+ if (!mdio_bus_switch) {
+ ret = -EPROBE_DEFER;
+ goto out_free_chip;
+ }
+ cd->host_dev = &mdio_bus_switch->dev;
+ }
+
for_each_available_child_of_node(child, port) {
port_reg = of_get_property(port, "reg", NULL);
if (!port_reg)
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index d5f1f9b862ea..311796c809af 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -13,9 +13,10 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
struct dsa_device_ops {
- netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
+ struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
int (*rcv)(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
};
@@ -26,7 +27,7 @@ struct dsa_slave_priv {
* switch port.
*/
struct net_device *dev;
- netdev_tx_t (*xmit)(struct sk_buff *skb,
+ struct sk_buff * (*xmit)(struct sk_buff *skb,
struct net_device *dev);
/*
@@ -47,6 +48,9 @@ struct dsa_slave_priv {
int old_duplex;
struct net_device *bridge_dev;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+#endif
};
/* dsa.c */
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 35c47ddd04f0..aa0266f7d0ce 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -18,6 +18,7 @@
#include <net/rtnetlink.h>
#include <net/switchdev.h>
#include <linux/if_bridge.h>
+#include <linux/netpoll.h>
#include "dsa_priv.h"
/* slave mii_bus handling ***************************************************/
@@ -199,103 +200,66 @@ out:
return 0;
}
-static int dsa_slave_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid, u16 nlm_flags)
+static int dsa_slave_port_fdb_add(struct net_device *dev,
+ struct switchdev_obj *obj)
{
+ struct switchdev_obj_fdb *fdb = &obj->u.fdb;
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
int ret = -EOPNOTSUPP;
- if (ds->drv->fdb_add)
- ret = ds->drv->fdb_add(ds, p->port, addr, vid);
+ if (obj->trans == SWITCHDEV_TRANS_PREPARE)
+ ret = ds->drv->port_fdb_add ? 0 : -EOPNOTSUPP;
+ else if (obj->trans == SWITCHDEV_TRANS_COMMIT)
+ ret = ds->drv->port_fdb_add(ds, p->port, fdb->addr, fdb->vid);
return ret;
}
-static int dsa_slave_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int dsa_slave_port_fdb_del(struct net_device *dev,
+ struct switchdev_obj *obj)
{
+ struct switchdev_obj_fdb *fdb = &obj->u.fdb;
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
int ret = -EOPNOTSUPP;
- if (ds->drv->fdb_del)
- ret = ds->drv->fdb_del(ds, p->port, addr, vid);
+ if (ds->drv->port_fdb_del)
+ ret = ds->drv->port_fdb_del(ds, p->port, fdb->addr, fdb->vid);
return ret;
}
-static int dsa_slave_fill_info(struct net_device *dev, struct sk_buff *skb,
- const unsigned char *addr, u16 vid,
- bool is_static,
- u32 portid, u32 seq, int type,
- unsigned int flags)
-{
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
-
- nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
- if (!nlh)
- return -EMSGSIZE;
-
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = NTF_EXT_LEARNED;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = dev->ifindex;
- ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
-
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
- goto nla_put_failure;
-
- if (vid && nla_put_u16(skb, NDA_VLAN, vid))
- goto nla_put_failure;
-
- nlmsg_end(skb, nlh);
- return 0;
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
-}
-
-/* Dump information about entries, in response to GETNEIGH */
-static int dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev, int idx)
+static int dsa_slave_port_fdb_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
unsigned char addr[ETH_ALEN] = { 0 };
+ u16 vid = 0;
int ret;
- if (!ds->drv->fdb_getnext)
+ if (!ds->drv->port_fdb_getnext)
return -EOPNOTSUPP;
- for (; ; idx++) {
+ for (;;) {
bool is_static;
- ret = ds->drv->fdb_getnext(ds, p->port, addr, &is_static);
+ ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid,
+ &is_static);
if (ret < 0)
break;
- if (idx < cb->args[0])
- continue;
+ obj->u.fdb.addr = addr;
+ obj->u.fdb.vid = vid;
+ obj->u.fdb.ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
- ret = dsa_slave_fill_info(dev, skb, addr, 0,
- is_static,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH, NLM_F_MULTI);
+ ret = obj->cb(dev, obj);
if (ret < 0)
break;
}
- return idx;
+ return ret == -ENOENT ? 0 : ret;
}
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -363,6 +327,62 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
return ret;
}
+static int dsa_slave_port_obj_add(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ int err;
+
+ /* For the prepare phase, ensure the full set of changes is feasable in
+ * one go in order to signal a failure properly. If an operation is not
+ * supported, return -EOPNOTSUPP.
+ */
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = dsa_slave_port_fdb_add(dev, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dsa_slave_port_obj_del(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = dsa_slave_port_fdb_del(dev, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dsa_slave_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = dsa_slave_port_fdb_dump(dev, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
static int dsa_slave_bridge_port_join(struct net_device *dev,
struct net_device *br)
{
@@ -418,24 +438,53 @@ static int dsa_slave_port_attr_get(struct net_device *dev,
return 0;
}
-static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
+static inline netdev_tx_t dsa_netpoll_send_skb(struct dsa_slave_priv *p,
+ struct sk_buff *skb)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
-
- return p->xmit(skb, dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (p->netpoll)
+ netpoll_send_skb(p->netpoll, skb);
+#else
+ BUG();
+#endif
+ return NETDEV_TX_OK;
}
-static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
+ struct sk_buff *nskb;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* Transmit function may have to reallocate the original SKB */
+ nskb = p->xmit(skb, dev);
+ if (!nskb)
+ return NETDEV_TX_OK;
+
+ /* SKB for netpoll still need to be mangled with the protocol-specific
+ * tag to be successfully transmitted
+ */
+ if (unlikely(netpoll_tx_running(dev)))
+ return dsa_netpoll_send_skb(p, nskb);
- skb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(skb);
+ /* Queue the SKB for transmission on the parent interface, but
+ * do not modify its EtherType
+ */
+ nskb->dev = p->parent->dst->master_netdev;
+ dev_queue_xmit(nskb);
return NETDEV_TX_OK;
}
+static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ /* Just return the original SKB */
+ return skb;
+}
+
/* ethtool operations *******************************************************/
static int
@@ -665,6 +714,49 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
return ret;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int dsa_slave_netpoll_setup(struct net_device *dev,
+ struct netpoll_info *ni)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ struct net_device *master = ds->dst->master_netdev;
+ struct netpoll *netpoll;
+ int err = 0;
+
+ netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+ if (!netpoll)
+ return -ENOMEM;
+
+ err = __netpoll_setup(netpoll, master);
+ if (err) {
+ kfree(netpoll);
+ goto out;
+ }
+
+ p->netpoll = netpoll;
+out:
+ return err;
+}
+
+static void dsa_slave_netpoll_cleanup(struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct netpoll *netpoll = p->netpoll;
+
+ if (!netpoll)
+ return;
+
+ p->netpoll = NULL;
+
+ __netpoll_free_async(netpoll);
+}
+
+static void dsa_slave_poll_controller(struct net_device *dev)
+{
+}
+#endif
+
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_settings = dsa_slave_get_settings,
.set_settings = dsa_slave_set_settings,
@@ -692,16 +784,24 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
- .ndo_fdb_add = dsa_slave_fdb_add,
- .ndo_fdb_del = dsa_slave_fdb_del,
- .ndo_fdb_dump = dsa_slave_fdb_dump,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_do_ioctl = dsa_slave_ioctl,
.ndo_get_iflink = dsa_slave_get_iflink,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_setup = dsa_slave_netpoll_setup,
+ .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
+ .ndo_poll_controller = dsa_slave_poll_controller,
+#endif
};
static const struct switchdev_ops dsa_slave_switchdev_ops = {
.switchdev_port_attr_get = dsa_slave_port_attr_get,
.switchdev_port_attr_set = dsa_slave_port_attr_set,
+ .switchdev_port_obj_add = dsa_slave_port_obj_add,
+ .switchdev_port_obj_del = dsa_slave_port_obj_del,
+ .switchdev_port_obj_dump = dsa_slave_port_obj_dump,
};
static void dsa_slave_adjust_link(struct net_device *dev)
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 83d3572cdb20..e2aadb73111d 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -58,14 +58,11 @@
#define BRCM_EG_TC_MASK 0x7
#define BRCM_EG_PID_MASK 0x1f
-static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *brcm_tag;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
goto out_free;
@@ -87,17 +84,11 @@ static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
- /* Queue the SKB for transmission on the parent interface, but
- * do not modify its EtherType
- */
- skb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(skb);
-
- return NETDEV_TX_OK;
+ return skb;
out_free:
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 2dab27063273..aa780e4ac0bd 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -15,14 +15,11 @@
#define DSA_HLEN 4
-static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *dsa_header;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
/*
* Convert the outermost 802.1q tag to a DSA tag for tagged
* packets, or insert a DSA tag between the addresses and
@@ -63,14 +60,11 @@ static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
dsa_header[3] = 0x00;
}
- skb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(skb);
-
- return NETDEV_TX_OK;
+ return skb;
out_free:
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 9aeda596f7ec..2288c8098c42 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -16,14 +16,11 @@
#define DSA_HLEN 4
#define EDSA_HLEN 8
-static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *edsa_header;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
/*
* Convert the outermost 802.1q tag to a DSA tag and prepend
* a DSA ethertype field is the packet is tagged, or insert
@@ -76,14 +73,11 @@ static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
edsa_header[7] = 0x00;
}
- skb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(skb);
-
- return NETDEV_TX_OK;
+ return skb;
out_free:
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index e268f9db8893..d25efc93d8f1 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -13,16 +13,13 @@
#include <linux/slab.h>
#include "dsa_priv.h"
-static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct sk_buff *nskb;
int padlen;
u8 *trailer;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
/*
* We have to make sure that the trailer ends up as the very
* last 4 bytes of the packet. This means that we have to pad
@@ -36,7 +33,7 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
if (nskb == NULL) {
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
skb_reserve(nskb, NET_IP_ALIGN);
@@ -57,10 +54,7 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
trailer[2] = 0x10;
trailer[3] = 0x00;
- nskb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(nskb);
-
- return NETDEV_TX_OK;
+ return nskb;
}
static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 77e0f0e7a88e..217127c3a3ef 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -114,7 +114,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
EXPORT_SYMBOL(eth_header);
/**
- * eth_get_headlen - determine the the length of header for an ethernet frame
+ * eth_get_headlen - determine the length of header for an ethernet frame
* @data: pointer to start of frame
* @len: total length of frame
*
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index b2155a123f6c..8d5960a37195 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -24,6 +24,26 @@ rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
}
static inline int
+rdev_suspend(struct cfg802154_registered_device *rdev)
+{
+ int ret;
+ trace_802154_rdev_suspend(&rdev->wpan_phy);
+ ret = rdev->ops->suspend(&rdev->wpan_phy);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
+rdev_resume(struct cfg802154_registered_device *rdev)
+{
+ int ret;
+ trace_802154_rdev_resume(&rdev->wpan_phy);
+ ret = rdev->ops->resume(&rdev->wpan_phy);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
unsigned char name_assign_type,
enum nl802154_iftype type, __le64 extended_addr)
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index 133b4280660c..bd88525b041e 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -14,11 +14,13 @@
*/
#include <linux/device.h>
+#include <linux/rtnetlink.h>
#include <net/cfg802154.h>
#include "core.h"
#include "sysfs.h"
+#include "rdev-ops.h"
static inline struct cfg802154_registered_device *
dev_to_rdev(struct device *dev)
@@ -62,10 +64,46 @@ static struct attribute *pmib_attrs[] = {
};
ATTRIBUTE_GROUPS(pmib);
+#ifdef CONFIG_PM_SLEEP
+static int wpan_phy_suspend(struct device *dev)
+{
+ struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+ int ret = 0;
+
+ if (rdev->ops->suspend) {
+ rtnl_lock();
+ ret = rdev_suspend(rdev);
+ rtnl_unlock();
+ }
+
+ return ret;
+}
+
+static int wpan_phy_resume(struct device *dev)
+{
+ struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+ int ret = 0;
+
+ if (rdev->ops->resume) {
+ rtnl_lock();
+ ret = rdev_resume(rdev);
+ rtnl_unlock();
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
+#define WPAN_PHY_PM_OPS (&wpan_phy_pm_ops)
+#else
+#define WPAN_PHY_PM_OPS NULL
+#endif
+
struct class wpan_phy_class = {
.name = "ieee802154",
.dev_release = wpan_phy_release,
.dev_groups = pmib_groups,
+ .pm = WPAN_PHY_PM_OPS,
};
int wpan_phy_sysfs_init(void)
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
index 9b5f0eb36696..4399b7fbaa31 100644
--- a/net/ieee802154/trace.h
+++ b/net/ieee802154/trace.h
@@ -40,6 +40,28 @@
* rdev->ops traces *
*************************************************************/
+DECLARE_EVENT_CLASS(wpan_phy_only_evt,
+ TP_PROTO(struct wpan_phy *wpan_phy),
+ TP_ARGS(wpan_phy),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT, WPAN_PHY_PR_ARG)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_suspend,
+ TP_PROTO(struct wpan_phy *wpan_phy),
+ TP_ARGS(wpan_phy)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_resume,
+ TP_PROTO(struct wpan_phy *wpan_phy),
+ TP_ARGS(wpan_phy)
+);
+
TRACE_EVENT(802154_rdev_add_virtual_intf,
TP_PROTO(struct wpan_phy *wpan_phy, char *name,
enum nl802154_iftype type, __le64 extended_addr),
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9532ee87151f..cc4e498a0ccf 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -112,6 +112,7 @@
#include <net/raw.h>
#include <net/icmp.h>
#include <net/inet_common.h>
+#include <net/ip_tunnels.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/secure_seq.h>
@@ -1780,6 +1781,8 @@ static int __init inet_init(void)
dev_add_pack(&ip_packet_type);
+ ip_tunnel_core_init();
+
rc = 0;
out:
return rc;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 6c8b1fbafce8..34a308573f4b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -291,6 +291,40 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
}
+/* Create and send an arp packet. */
+static void arp_send_dst(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw,
+ const unsigned char *target_hw, struct sk_buff *oskb)
+{
+ struct sk_buff *skb;
+
+ /* arp on this interface. */
+ if (dev->flags & IFF_NOARP)
+ return;
+
+ skb = arp_create(type, ptype, dest_ip, dev, src_ip,
+ dest_hw, src_hw, target_hw);
+ if (!skb)
+ return;
+
+ if (oskb)
+ skb_dst_copy(skb, oskb);
+
+ arp_xmit(skb);
+}
+
+void arp_send(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw, const unsigned char *src_hw,
+ const unsigned char *target_hw)
+{
+ arp_send_dst(type, ptype, dest_ip, dev, src_ip, dest_hw, src_hw,
+ target_hw, NULL);
+}
+EXPORT_SYMBOL(arp_send);
+
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
__be32 saddr = 0;
@@ -346,8 +380,9 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
}
}
- arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
- dst_hw, dev->dev_addr, NULL);
+ arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
+ dst_hw, dev->dev_addr, NULL,
+ dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb);
}
static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -597,32 +632,6 @@ void arp_xmit(struct sk_buff *skb)
EXPORT_SYMBOL(arp_xmit);
/*
- * Create and send an arp packet.
- */
-void arp_send(int type, int ptype, __be32 dest_ip,
- struct net_device *dev, __be32 src_ip,
- const unsigned char *dest_hw, const unsigned char *src_hw,
- const unsigned char *target_hw)
-{
- struct sk_buff *skb;
-
- /*
- * No arp on this interface.
- */
-
- if (dev->flags&IFF_NOARP)
- return;
-
- skb = arp_create(type, ptype, dest_ip, dev, src_ip,
- dest_hw, src_hw, target_hw);
- if (!skb)
- return;
-
- arp_xmit(skb);
-}
-EXPORT_SYMBOL(arp_send);
-
-/*
* Process an arp request.
*/
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 574fad9cca05..f915abff1350 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -74,7 +74,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
inet->inet_daddr = fl4->daddr;
inet->inet_dport = usin->sin_port;
sk->sk_state = TCP_ESTABLISHED;
- inet_set_txhash(sk);
+ sk_set_txhash(sk);
inet->inet_id = jiffies;
sk_dst_set(sk, &rt->dst);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6bbc54940eb4..6b98de0d7949 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -280,6 +280,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_scope = scope;
fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+ fl4.flowi4_tun_key.tun_id = 0;
if (!fib_lookup(net, &fl4, &res, 0))
return FIB_RES_PREFSRC(net, res);
} else {
@@ -313,6 +314,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fl4.saddr = dst;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+ fl4.flowi4_tun_key.tun_id = 0;
no_addr = idev->ifa_list == NULL;
@@ -591,6 +593,8 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
[RTA_FLOW] = { .type = NLA_U32 },
+ [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
+ [RTA_ENCAP] = { .type = NLA_NESTED },
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
@@ -656,6 +660,12 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
case RTA_TABLE:
cfg->fc_table = nla_get_u32(attr);
break;
+ case RTA_ENCAP:
+ cfg->fc_encap = attr;
+ break;
+ case RTA_ENCAP_TYPE:
+ cfg->fc_encap_type = nla_get_u16(attr);
+ break;
}
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3a06586b170c..558e196bae0f 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -42,6 +42,7 @@
#include <net/ip_fib.h>
#include <net/netlink.h>
#include <net/nexthop.h>
+#include <net/lwtunnel.h>
#include "fib_lookup.h"
@@ -208,6 +209,7 @@ static void free_fib_info_rcu(struct rcu_head *head)
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
dev_put(nexthop_nh->nh_dev);
+ lwtstate_put(nexthop_nh->nh_lwtstate);
free_nh_exceptions(nexthop_nh);
rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
rt_fibinfo_free(&nexthop_nh->nh_rth_input);
@@ -266,6 +268,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid != onh->nh_tclassid ||
#endif
+ lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
return -1;
onh++;
@@ -366,6 +369,7 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
if (fi->fib_nhs) {
+ size_t nh_encapsize = 0;
/* Also handles the special case fib_nhs == 1 */
/* each nexthop is packed in an attribute */
@@ -374,8 +378,21 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
/* may contain flow and gateway attribute */
nhsize += 2 * nla_total_size(4);
+ /* grab encap info */
+ for_nexthops(fi) {
+ if (nh->nh_lwtstate) {
+ /* RTA_ENCAP_TYPE */
+ nh_encapsize += lwtunnel_get_encap_size(
+ nh->nh_lwtstate);
+ /* RTA_ENCAP */
+ nh_encapsize += nla_total_size(2);
+ }
+ } endfor_nexthops(fi);
+
/* all nexthops are packed in a nested attribute */
- payload += nla_total_size(fi->fib_nhs * nhsize);
+ payload += nla_total_size((fi->fib_nhs * nhsize) +
+ nh_encapsize);
+
}
return payload;
@@ -421,13 +438,15 @@ static int fib_detect_death(struct fib_info *fi, int order,
if (n) {
state = n->nud_state;
neigh_release(n);
+ } else {
+ return 0;
}
if (state == NUD_REACHABLE)
return 0;
if ((state & NUD_VALID) && order != dflt)
return 0;
if ((state & NUD_VALID) ||
- (*last_idx < 0 && order > dflt)) {
+ (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
*last_resort = fi;
*last_idx = order;
}
@@ -452,6 +471,9 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg)
{
+ struct net *net = cfg->fc_nlinfo.nl_net;
+ int ret;
+
change_nexthops(fi) {
int attrlen;
@@ -475,18 +497,66 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
if (nexthop_nh->nh_tclassid)
fi->fib_net->ipv4.fib_num_tclassid_users++;
#endif
+ nla = nla_find(attrs, attrlen, RTA_ENCAP);
+ if (nla) {
+ struct lwtunnel_state *lwtstate;
+ struct net_device *dev = NULL;
+ struct nlattr *nla_entype;
+
+ nla_entype = nla_find(attrs, attrlen,
+ RTA_ENCAP_TYPE);
+ if (!nla_entype)
+ goto err_inval;
+ if (cfg->fc_oif)
+ dev = __dev_get_by_index(net, cfg->fc_oif);
+ ret = lwtunnel_build_state(dev, nla_get_u16(
+ nla_entype),
+ nla, &lwtstate);
+ if (ret)
+ goto errout;
+ nexthop_nh->nh_lwtstate =
+ lwtstate_get(lwtstate);
+ }
}
rtnh = rtnh_next(rtnh, &remaining);
} endfor_nexthops(fi);
return 0;
+
+err_inval:
+ ret = -EINVAL;
+
+errout:
+ return ret;
}
#endif
+int fib_encap_match(struct net *net, u16 encap_type,
+ struct nlattr *encap,
+ int oif, const struct fib_nh *nh)
+{
+ struct lwtunnel_state *lwtstate;
+ struct net_device *dev = NULL;
+ int ret;
+
+ if (encap_type == LWTUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (oif)
+ dev = __dev_get_by_index(net, oif);
+ ret = lwtunnel_build_state(dev, encap_type,
+ encap, &lwtstate);
+ if (!ret)
+ return lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+
+ return 0;
+}
+
int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
{
+ struct net *net = cfg->fc_nlinfo.nl_net;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
struct rtnexthop *rtnh;
int remaining;
@@ -496,6 +566,12 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
return 1;
if (cfg->fc_oif || cfg->fc_gw) {
+ if (cfg->fc_encap) {
+ if (fib_encap_match(net, cfg->fc_encap_type,
+ cfg->fc_encap, cfg->fc_oif,
+ fi->fib_nh))
+ return 1;
+ }
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
return 0;
@@ -882,6 +958,21 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
} else {
struct fib_nh *nh = fi->fib_nh;
+ if (cfg->fc_encap) {
+ struct lwtunnel_state *lwtstate;
+ struct net_device *dev = NULL;
+
+ if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
+ goto err_inval;
+ if (cfg->fc_oif)
+ dev = __dev_get_by_index(net, cfg->fc_oif);
+ err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+ cfg->fc_encap, &lwtstate);
+ if (err)
+ goto failure;
+
+ nh->nh_lwtstate = lwtstate_get(lwtstate);
+ }
nh->nh_oif = cfg->fc_oif;
nh->nh_gw = cfg->fc_gw;
nh->nh_flags = cfg->fc_flags;
@@ -1055,6 +1146,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
goto nla_put_failure;
#endif
+ if (fi->fib_nh->nh_lwtstate)
+ lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (fi->fib_nhs > 1) {
@@ -1090,6 +1183,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
goto nla_put_failure;
#endif
+ if (nh->nh_lwtstate)
+ lwtunnel_fill_encap(skb, nh->nh_lwtstate);
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
} endfor_nexthops(fi);
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4a7b5b2a1ce3..d9c552a721fc 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -31,7 +31,6 @@
#include <net/xfrm.h>
static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
-static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX];
int gre_add_protocol(const struct gre_protocol *proto, u8 version)
{
@@ -61,197 +60,6 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
}
EXPORT_SYMBOL_GPL(gre_del_protocol);
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
- int hdr_len)
-{
- struct gre_base_hdr *greh;
-
- skb_push(skb, hdr_len);
-
- skb_reset_transport_header(skb);
- greh = (struct gre_base_hdr *)skb->data;
- greh->flags = tnl_flags_to_gre_flags(tpi->flags);
- greh->protocol = tpi->proto;
-
- if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
- __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
- if (tpi->flags&TUNNEL_SEQ) {
- *ptr = tpi->seq;
- ptr--;
- }
- if (tpi->flags&TUNNEL_KEY) {
- *ptr = tpi->key;
- ptr--;
- }
- if (tpi->flags&TUNNEL_CSUM &&
- !(skb_shinfo(skb)->gso_type &
- (SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) {
- *ptr = 0;
- *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
- skb->len, 0));
- }
- }
-}
-EXPORT_SYMBOL_GPL(gre_build_header);
-
-static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
- bool *csum_err)
-{
- const struct gre_base_hdr *greh;
- __be32 *options;
- int hdr_len;
-
- if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
- return -EINVAL;
-
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
- if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
- return -EINVAL;
-
- tpi->flags = gre_flags_to_tnl_flags(greh->flags);
- hdr_len = ip_gre_calc_hlen(tpi->flags);
-
- if (!pskb_may_pull(skb, hdr_len))
- return -EINVAL;
-
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
- tpi->proto = greh->protocol;
-
- options = (__be32 *)(greh + 1);
- if (greh->flags & GRE_CSUM) {
- if (skb_checksum_simple_validate(skb)) {
- *csum_err = true;
- return -EINVAL;
- }
-
- skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
- null_compute_pseudo);
-
- options++;
- }
-
- if (greh->flags & GRE_KEY) {
- tpi->key = *options;
- options++;
- } else
- tpi->key = 0;
-
- if (unlikely(greh->flags & GRE_SEQ)) {
- tpi->seq = *options;
- options++;
- } else
- tpi->seq = 0;
-
- /* WCCP version 1 and 2 protocol decoding.
- * - Change protocol to IP
- * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
- */
- if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
- tpi->proto = htons(ETH_P_IP);
- if ((*(u8 *)options & 0xF0) != 0x40) {
- hdr_len += 4;
- if (!pskb_may_pull(skb, hdr_len))
- return -EINVAL;
- }
- }
-
- return iptunnel_pull_header(skb, hdr_len, tpi->proto);
-}
-
-static int gre_cisco_rcv(struct sk_buff *skb)
-{
- struct tnl_ptk_info tpi;
- int i;
- bool csum_err = false;
-
-#ifdef CONFIG_NET_IPGRE_BROADCAST
- if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
- /* Looped back packet, drop it! */
- if (rt_is_output_route(skb_rtable(skb)))
- goto drop;
- }
-#endif
-
- if (parse_gre_header(skb, &tpi, &csum_err) < 0)
- goto drop;
-
- rcu_read_lock();
- for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
- struct gre_cisco_protocol *proto;
- int ret;
-
- proto = rcu_dereference(gre_cisco_proto_list[i]);
- if (!proto)
- continue;
- ret = proto->handler(skb, &tpi);
- if (ret == PACKET_RCVD) {
- rcu_read_unlock();
- return 0;
- }
- }
- rcu_read_unlock();
-
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
-drop:
- kfree_skb(skb);
- return 0;
-}
-
-static void gre_cisco_err(struct sk_buff *skb, u32 info)
-{
- /* All the routers (except for Linux) return only
- * 8 bytes of packet payload. It means, that precise relaying of
- * ICMP in the real Internet is absolutely infeasible.
- *
- * Moreover, Cisco "wise men" put GRE key to the third word
- * in GRE header. It makes impossible maintaining even soft
- * state for keyed
- * GRE tunnels with enabled checksum. Tell them "thank you".
- *
- * Well, I wonder, rfc1812 was written by Cisco employee,
- * what the hell these idiots break standards established
- * by themselves???
- */
-
- const int type = icmp_hdr(skb)->type;
- const int code = icmp_hdr(skb)->code;
- struct tnl_ptk_info tpi;
- bool csum_err = false;
- int i;
-
- if (parse_gre_header(skb, &tpi, &csum_err)) {
- if (!csum_err) /* ignore csum errors. */
- return;
- }
-
- if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
- ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- skb->dev->ifindex, 0, IPPROTO_GRE, 0);
- return;
- }
- if (type == ICMP_REDIRECT) {
- ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
- IPPROTO_GRE, 0);
- return;
- }
-
- rcu_read_lock();
- for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
- struct gre_cisco_protocol *proto;
-
- proto = rcu_dereference(gre_cisco_proto_list[i]);
- if (!proto)
- continue;
-
- if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD)
- goto out;
-
- }
-out:
- rcu_read_unlock();
-}
-
static int gre_rcv(struct sk_buff *skb)
{
const struct gre_protocol *proto;
@@ -302,60 +110,19 @@ static const struct net_protocol net_gre_protocol = {
.netns_ok = 1,
};
-static const struct gre_protocol ipgre_protocol = {
- .handler = gre_cisco_rcv,
- .err_handler = gre_cisco_err,
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *newp)
-{
- struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
- &gre_cisco_proto_list[newp->priority];
-
- return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_register);
-
-int gre_cisco_unregister(struct gre_cisco_protocol *del_proto)
-{
- struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
- &gre_cisco_proto_list[del_proto->priority];
- int ret;
-
- ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL;
-
- if (ret)
- return ret;
-
- synchronize_net();
- return 0;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_unregister);
-
static int __init gre_init(void)
{
pr_info("GRE over IPv4 demultiplexor driver\n");
if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
pr_err("can't add protocol\n");
- goto err;
- }
-
- if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) {
- pr_info("%s: can't add ipgre handler\n", __func__);
- goto err_gre;
+ return -EAGAIN;
}
-
return 0;
-err_gre:
- inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
-err:
- return -EAGAIN;
}
static void __exit gre_exit(void)
{
- gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f5203fba6236..c0556f1e4bf0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -496,6 +496,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
+ skb_dst_set(skb_in, NULL);
err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
RT_TOS(tos), rt2->dst.dev);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0cb9165421d4..89120196a949 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -343,7 +343,6 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
struct sock *sk2;
const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw = NULL;
- int twrefcnt = 0;
spin_lock(lock);
@@ -371,21 +370,17 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
WARN_ON(!sk_unhashed(sk));
__sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) {
- twrefcnt = inet_twsk_unhash(tw);
+ sk_nulls_del_node_init_rcu((struct sock *)tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
}
spin_unlock(lock);
- if (twrefcnt)
- inet_twsk_put(tw);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
if (twp) {
*twp = tw;
} else if (tw) {
/* Silly. Should hash-dance instead... */
- inet_twsk_deschedule(tw);
-
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
}
return 0;
@@ -403,13 +398,12 @@ static u32 inet_sk_port_offset(const struct sock *sk)
inet->inet_dport);
}
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct hlist_nulls_head *list;
struct inet_ehash_bucket *head;
spinlock_t *lock;
- int twrefcnt = 0;
WARN_ON(!sk_unhashed(sk));
@@ -420,23 +414,22 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
spin_lock(lock);
__sk_nulls_add_node_rcu(sk, list);
- if (tw) {
- WARN_ON(sk->sk_hash != tw->tw_hash);
- twrefcnt = inet_twsk_unhash(tw);
+ if (osk) {
+ WARN_ON(sk->sk_hash != osk->sk_hash);
+ sk_nulls_del_node_init_rcu(osk);
}
spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- return twrefcnt;
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_listen_hashbucket *ilb;
if (sk->sk_state != TCP_LISTEN)
- return __inet_hash_nolisten(sk, tw);
+ return __inet_hash_nolisten(sk, osk);
WARN_ON(!sk_unhashed(sk));
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -445,7 +438,6 @@ int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
__sk_nulls_add_node_rcu(sk, &ilb->head);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
spin_unlock(&ilb->lock);
- return 0;
}
EXPORT_SYMBOL(__inet_hash);
@@ -492,7 +484,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct inet_bind_bucket *tb;
int ret;
struct net *net = sock_net(sk);
- int twrefcnt = 1;
if (!snum) {
int i, remaining, low, high, port;
@@ -560,19 +551,14 @@ ok:
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
- twrefcnt += __inet_hash_nolisten(sk, tw);
+ __inet_hash_nolisten(sk, (struct sock *)tw);
}
if (tw)
- twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
+ inet_twsk_bind_unhash(tw, hinfo);
spin_unlock(&head->lock);
- if (tw) {
- inet_twsk_deschedule(tw);
- while (twrefcnt) {
- twrefcnt--;
- inet_twsk_put(tw);
- }
- }
+ if (tw)
+ inet_twsk_deschedule_put(tw);
ret = 0;
goto out;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2ffbd16b79e0..ae22cc24fbe8 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -18,28 +18,6 @@
/**
- * inet_twsk_unhash - unhash a timewait socket from established hash
- * @tw: timewait socket
- *
- * unhash a timewait socket from established hash, if hashed.
- * ehash lock must be held by caller.
- * Returns 1 if caller should call inet_twsk_put() after lock release.
- */
-int inet_twsk_unhash(struct inet_timewait_sock *tw)
-{
- if (hlist_nulls_unhashed(&tw->tw_node))
- return 0;
-
- hlist_nulls_del_rcu(&tw->tw_node);
- sk_nulls_node_init(&tw->tw_node);
- /*
- * We cannot call inet_twsk_put() ourself under lock,
- * caller must call it for us.
- */
- return 1;
-}
-
-/**
* inet_twsk_bind_unhash - unhash a timewait socket from bind hash
* @tw: timewait socket
* @hashinfo: hashinfo pointer
@@ -48,35 +26,29 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw)
* bind hash lock must be held by caller.
* Returns 1 if caller should call inet_twsk_put() after lock release.
*/
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo)
{
struct inet_bind_bucket *tb = tw->tw_tb;
if (!tb)
- return 0;
+ return;
__hlist_del(&tw->tw_bind_node);
tw->tw_tb = NULL;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
- /*
- * We cannot call inet_twsk_put() ourself under lock,
- * caller must call it for us.
- */
- return 1;
+ __sock_put((struct sock *)tw);
}
/* Must be called with locally disabled BHs. */
static void inet_twsk_kill(struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
- struct inet_bind_hashbucket *bhead;
- int refcnt;
- /* Unlink from established hashes. */
spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+ struct inet_bind_hashbucket *bhead;
spin_lock(lock);
- refcnt = inet_twsk_unhash(tw);
+ sk_nulls_del_node_init_rcu((struct sock *)tw);
spin_unlock(lock);
/* Disassociate with bind bucket. */
@@ -84,11 +56,9 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
hashinfo->bhash_size)];
spin_lock(&bhead->lock);
- refcnt += inet_twsk_bind_unhash(tw, hashinfo);
+ inet_twsk_bind_unhash(tw, hashinfo);
spin_unlock(&bhead->lock);
- BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
- atomic_sub(refcnt, &tw->tw_refcnt);
atomic_dec(&tw->tw_dr->tw_count);
inet_twsk_put(tw);
}
@@ -235,13 +205,17 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc);
* tcp_input.c to verify this.
*/
-/* This is for handling early-kills of TIME_WAIT sockets. */
-void inet_twsk_deschedule(struct inet_timewait_sock *tw)
+/* This is for handling early-kills of TIME_WAIT sockets.
+ * Warning : consume reference.
+ * Caller should not access tw anymore.
+ */
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
{
if (del_timer_sync(&tw->tw_timer))
inet_twsk_kill(tw);
+ inet_twsk_put(tw);
}
-EXPORT_SYMBOL(inet_twsk_deschedule);
+EXPORT_SYMBOL(inet_twsk_deschedule_put);
void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
{
@@ -311,9 +285,8 @@ restart:
rcu_read_unlock();
local_bh_disable();
- inet_twsk_deschedule(tw);
+ inet_twsk_deschedule_put(tw);
local_bh_enable();
- inet_twsk_put(tw);
goto restart_rcu;
}
/* If the nulls value we got at the end of this lookup is
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 921138f6c97c..d96722ae8979 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -522,7 +522,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
int len;
int ihlen;
int err;
- int sum_truesize;
u8 ecn;
ipq_kill(qp);
@@ -590,32 +589,19 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
add_frag_mem_limit(qp->q.net, clone->truesize);
}
+ skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
- sum_truesize = head->truesize;
- for (fp = head->next; fp;) {
- bool headstolen;
- int delta;
- struct sk_buff *next = fp->next;
-
- sum_truesize += fp->truesize;
+ for (fp=head->next; fp; fp = fp->next) {
+ head->data_len += fp->len;
+ head->len += fp->len;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
-
- if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
- kfree_skb_partial(fp, headstolen);
- } else {
- if (!skb_shinfo(head)->frag_list)
- skb_shinfo(head)->frag_list = fp;
- head->data_len += fp->len;
- head->len += fp->len;
- head->truesize += fp->truesize;
- }
- fp = next;
+ head->truesize += fp->truesize;
}
- sub_frag_mem_limit(qp->q.net, sum_truesize);
+ sub_frag_mem_limit(qp->q.net, head->truesize);
head->next = NULL;
head->dev = dev;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5fd706473c73..fb44d693796e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -25,6 +25,7 @@
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/mroute.h>
+#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
@@ -47,6 +48,7 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/gre.h>
+#include <net/dst_metadata.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -121,8 +123,127 @@ static int ipgre_tunnel_init(struct net_device *dev);
static int ipgre_net_id __read_mostly;
static int gre_tap_net_id __read_mostly;
-static int ipgre_err(struct sk_buff *skb, u32 info,
- const struct tnl_ptk_info *tpi)
+static int ip_gre_calc_hlen(__be16 o_flags)
+{
+ int addend = 4;
+
+ if (o_flags & TUNNEL_CSUM)
+ addend += 4;
+ if (o_flags & TUNNEL_KEY)
+ addend += 4;
+ if (o_flags & TUNNEL_SEQ)
+ addend += 4;
+ return addend;
+}
+
+static __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+ __be16 tflags = 0;
+
+ if (flags & GRE_CSUM)
+ tflags |= TUNNEL_CSUM;
+ if (flags & GRE_ROUTING)
+ tflags |= TUNNEL_ROUTING;
+ if (flags & GRE_KEY)
+ tflags |= TUNNEL_KEY;
+ if (flags & GRE_SEQ)
+ tflags |= TUNNEL_SEQ;
+ if (flags & GRE_STRICT)
+ tflags |= TUNNEL_STRICT;
+ if (flags & GRE_REC)
+ tflags |= TUNNEL_REC;
+ if (flags & GRE_VERSION)
+ tflags |= TUNNEL_VERSION;
+
+ return tflags;
+}
+
+static __be16 tnl_flags_to_gre_flags(__be16 tflags)
+{
+ __be16 flags = 0;
+
+ if (tflags & TUNNEL_CSUM)
+ flags |= GRE_CSUM;
+ if (tflags & TUNNEL_ROUTING)
+ flags |= GRE_ROUTING;
+ if (tflags & TUNNEL_KEY)
+ flags |= GRE_KEY;
+ if (tflags & TUNNEL_SEQ)
+ flags |= GRE_SEQ;
+ if (tflags & TUNNEL_STRICT)
+ flags |= GRE_STRICT;
+ if (tflags & TUNNEL_REC)
+ flags |= GRE_REC;
+ if (tflags & TUNNEL_VERSION)
+ flags |= GRE_VERSION;
+
+ return flags;
+}
+
+static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ bool *csum_err)
+{
+ const struct gre_base_hdr *greh;
+ __be32 *options;
+ int hdr_len;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+ return -EINVAL;
+
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
+ return -EINVAL;
+
+ tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+ hdr_len = ip_gre_calc_hlen(tpi->flags);
+
+ if (!pskb_may_pull(skb, hdr_len))
+ return -EINVAL;
+
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ tpi->proto = greh->protocol;
+
+ options = (__be32 *)(greh + 1);
+ if (greh->flags & GRE_CSUM) {
+ if (skb_checksum_simple_validate(skb)) {
+ *csum_err = true;
+ return -EINVAL;
+ }
+
+ skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+ null_compute_pseudo);
+ options++;
+ }
+
+ if (greh->flags & GRE_KEY) {
+ tpi->key = *options;
+ options++;
+ } else {
+ tpi->key = 0;
+ }
+ if (unlikely(greh->flags & GRE_SEQ)) {
+ tpi->seq = *options;
+ options++;
+ } else {
+ tpi->seq = 0;
+ }
+ /* WCCP version 1 and 2 protocol decoding.
+ * - Change protocol to IP
+ * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+ */
+ if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+ tpi->proto = htons(ETH_P_IP);
+ if ((*(u8 *)options & 0xF0) != 0x40) {
+ hdr_len += 4;
+ if (!pskb_may_pull(skb, hdr_len))
+ return -EINVAL;
+ }
+ }
+ return iptunnel_pull_header(skb, hdr_len, tpi->proto);
+}
+
+static void ipgre_err(struct sk_buff *skb, u32 info,
+ const struct tnl_ptk_info *tpi)
{
/* All the routers (except for Linux) return only
@@ -148,14 +269,14 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
switch (type) {
default:
case ICMP_PARAMETERPROB:
- return PACKET_RCVD;
+ return;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
- return PACKET_RCVD;
+ return;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -164,9 +285,10 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
break;
}
break;
+
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
- return PACKET_RCVD;
+ return;
break;
case ICMP_REDIRECT:
@@ -183,26 +305,85 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
iph->daddr, iph->saddr, tpi->key);
if (!t)
- return PACKET_REJECT;
+ return;
if (t->parms.iph.daddr == 0 ||
ipv4_is_multicast(t->parms.iph.daddr))
- return PACKET_RCVD;
+ return;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
- return PACKET_RCVD;
+ return;
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
- return PACKET_RCVD;
+}
+
+static void gre_err(struct sk_buff *skb, u32 info)
+{
+ /* All the routers (except for Linux) return only
+ * 8 bytes of packet payload. It means, that precise relaying of
+ * ICMP in the real Internet is absolutely infeasible.
+ *
+ * Moreover, Cisco "wise men" put GRE key to the third word
+ * in GRE header. It makes impossible maintaining even soft
+ * state for keyed
+ * GRE tunnels with enabled checksum. Tell them "thank you".
+ *
+ * Well, I wonder, rfc1812 was written by Cisco employee,
+ * what the hell these idiots break standards established
+ * by themselves???
+ */
+
+ const int type = icmp_hdr(skb)->type;
+ const int code = icmp_hdr(skb)->code;
+ struct tnl_ptk_info tpi;
+ bool csum_err = false;
+
+ if (parse_gre_header(skb, &tpi, &csum_err)) {
+ if (!csum_err) /* ignore csum errors. */
+ return;
+ }
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ skb->dev->ifindex, 0, IPPROTO_GRE, 0);
+ return;
+ }
+ if (type == ICMP_REDIRECT) {
+ ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
+ IPPROTO_GRE, 0);
+ return;
+ }
+
+ ipgre_err(skb, info, &tpi);
+}
+
+static __be64 key_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be64)((__force u32)key);
+#else
+ return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static __be32 tunnel_id_to_key(__be64 x)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be32)x;
+#else
+ return (__force __be32)((__force u64)x >> 32);
+#endif
}
static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
{
struct net *net = dev_net(skb->dev);
+ struct metadata_dst *tun_dst = NULL;
struct ip_tunnel_net *itn;
const struct iphdr *iph;
struct ip_tunnel *tunnel;
@@ -218,40 +399,194 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
if (tunnel) {
skb_pop_mac_header(skb);
- ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
+ if (tunnel->collect_md) {
+ struct ip_tunnel_info *info;
+
+ tun_dst = metadata_dst_alloc(0, GFP_ATOMIC);
+ if (!tun_dst)
+ return PACKET_REJECT;
+
+ info = &tun_dst->u.tun_info;
+ info->key.ipv4_src = iph->saddr;
+ info->key.ipv4_dst = iph->daddr;
+ info->key.ipv4_tos = iph->tos;
+ info->key.ipv4_ttl = iph->ttl;
+
+ info->mode = IP_TUNNEL_INFO_RX;
+ info->key.tun_flags = tpi->flags &
+ (TUNNEL_CSUM | TUNNEL_KEY);
+ info->key.tun_id = key_to_tunnel_id(tpi->key);
+
+ info->key.tp_src = 0;
+ info->key.tp_dst = 0;
+ }
+
+ ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
return PACKET_RCVD;
}
return PACKET_REJECT;
}
+static int gre_rcv(struct sk_buff *skb)
+{
+ struct tnl_ptk_info tpi;
+ bool csum_err = false;
+
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+ /* Looped back packet, drop it! */
+ if (rt_is_output_route(skb_rtable(skb)))
+ goto drop;
+ }
+#endif
+
+ if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+ goto drop;
+
+ if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
+ return 0;
+
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
+ __be16 proto, __be32 key, __be32 seq)
+{
+ struct gre_base_hdr *greh;
+
+ skb_push(skb, hdr_len);
+
+ skb_reset_transport_header(skb);
+ greh = (struct gre_base_hdr *)skb->data;
+ greh->flags = tnl_flags_to_gre_flags(flags);
+ greh->protocol = proto;
+
+ if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+ __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+ if (flags & TUNNEL_SEQ) {
+ *ptr = seq;
+ ptr--;
+ }
+ if (flags & TUNNEL_KEY) {
+ *ptr = key;
+ ptr--;
+ }
+ if (flags & TUNNEL_CSUM &&
+ !(skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+ *ptr = 0;
+ *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
+ skb->len, 0));
+ }
+ }
+}
+
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct tnl_ptk_info tpi;
- tpi.flags = tunnel->parms.o_flags;
- tpi.proto = proto;
- tpi.key = tunnel->parms.o_key;
if (tunnel->parms.o_flags & TUNNEL_SEQ)
tunnel->o_seqno++;
- tpi.seq = htonl(tunnel->o_seqno);
/* Push GRE header. */
- gre_build_header(skb, &tpi, tunnel->tun_hlen);
-
- skb_set_inner_protocol(skb, tpi.proto);
+ build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+ proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+ skb_set_inner_protocol(skb, proto);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
+static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+ bool csum)
+{
+ return iptunnel_handle_offloads(skb, csum,
+ csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
+}
+
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ip_tunnel_info *tun_info;
+ struct net *net = dev_net(dev);
+ const struct ip_tunnel_key *key;
+ struct flowi4 fl;
+ struct rtable *rt;
+ int min_headroom;
+ int tunnel_hlen;
+ __be16 df, flags;
+ int err;
+
+ tun_info = skb_tunnel_info(skb, AF_INET);
+ if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX))
+ goto err_free_skb;
+
+ key = &tun_info->key;
+ memset(&fl, 0, sizeof(fl));
+ fl.daddr = key->ipv4_dst;
+ fl.saddr = key->ipv4_src;
+ fl.flowi4_tos = RT_TOS(key->ipv4_tos);
+ fl.flowi4_mark = skb->mark;
+ fl.flowi4_proto = IPPROTO_GRE;
+
+ rt = ip_route_output_key(net, &fl);
+ if (IS_ERR(rt))
+ goto err_free_skb;
+
+ tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
+
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + tunnel_hlen + sizeof(struct iphdr);
+ if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+ int head_delta = SKB_DATA_ALIGN(min_headroom -
+ skb_headroom(skb) +
+ 16);
+ err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+ 0, GFP_ATOMIC);
+ if (unlikely(err))
+ goto err_free_rt;
+ }
+
+ /* Push Tunnel header. */
+ skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
+ if (IS_ERR(skb)) {
+ skb = NULL;
+ goto err_free_rt;
+ }
+
+ flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
+ build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
+ tunnel_id_to_key(tun_info->key.tun_id), 0);
+
+ df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
+ key->ipv4_dst, IPPROTO_GRE,
+ key->ipv4_tos, key->ipv4_ttl, df, false);
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+ return;
+
+err_free_rt:
+ ip_rt_put(rt);
+err_free_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+}
+
static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tnl_params;
+ if (tunnel->collect_md) {
+ gre_fb_xmit(skb, dev);
+ return NETDEV_TX_OK;
+ }
+
if (dev->header_ops) {
/* Need space for new headers */
if (skb_cow_head(skb, dev->needed_headroom -
@@ -277,7 +612,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
goto out;
__gre_xmit(skb, dev, tnl_params, skb->protocol);
-
return NETDEV_TX_OK;
free_skb:
@@ -292,6 +626,11 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ if (tunnel->collect_md) {
+ gre_fb_xmit(skb, dev);
+ return NETDEV_TX_OK;
+ }
+
skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
if (IS_ERR(skb))
goto out;
@@ -300,7 +639,6 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
goto free_skb;
__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
-
return NETDEV_TX_OK;
free_skb:
@@ -530,10 +868,9 @@ static int ipgre_tunnel_init(struct net_device *dev)
return ip_tunnel_init(dev);
}
-static struct gre_cisco_protocol ipgre_protocol = {
- .handler = ipgre_rcv,
- .err_handler = ipgre_err,
- .priority = 0,
+static const struct gre_protocol ipgre_protocol = {
+ .handler = gre_rcv,
+ .err_handler = gre_err,
};
static int __net_init ipgre_init_net(struct net *net)
@@ -596,8 +933,10 @@ out:
return ipgre_tunnel_validate(tb, data);
}
-static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
- struct ip_tunnel_parm *parms)
+static void ipgre_netlink_parms(struct net_device *dev,
+ struct nlattr *data[],
+ struct nlattr *tb[],
+ struct ip_tunnel_parm *parms)
{
memset(parms, 0, sizeof(*parms));
@@ -635,6 +974,12 @@ static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
parms->iph.frag_off = htons(IP_DF);
+
+ if (data[IFLA_GRE_COLLECT_METADATA]) {
+ struct ip_tunnel *t = netdev_priv(dev);
+
+ t->collect_md = true;
+ }
}
/* This function returns true when ENCAP attributes are present in the nl msg */
@@ -712,7 +1057,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev,
return err;
}
- ipgre_netlink_parms(data, tb, &p);
+ ipgre_netlink_parms(dev, data, tb, &p);
return ip_tunnel_newlink(dev, tb, &p);
}
@@ -730,7 +1075,7 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
- ipgre_netlink_parms(data, tb, &p);
+ ipgre_netlink_parms(dev, data, tb, &p);
return ip_tunnel_changelink(dev, tb, &p);
}
@@ -765,6 +1110,8 @@ static size_t ipgre_get_size(const struct net_device *dev)
nla_total_size(2) +
/* IFLA_GRE_ENCAP_DPORT */
nla_total_size(2) +
+ /* IFLA_GRE_COLLECT_METADATA */
+ nla_total_size(0) +
0;
}
@@ -796,6 +1143,11 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
t->encap.flags))
goto nla_put_failure;
+ if (t->collect_md) {
+ if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
+ goto nla_put_failure;
+ }
+
return 0;
nla_put_failure:
@@ -817,6 +1169,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
@@ -849,9 +1202,38 @@ static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
.get_link_net = ip_tunnel_get_link_net,
};
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+ u8 name_assign_type)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ struct ip_tunnel *t;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &ipgre_tap_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ /* Configure flow based GRE device. */
+ t = netdev_priv(dev);
+ t->collect_md = true;
+
+ err = ipgre_newlink(net, dev, tb, NULL);
+ if (err < 0)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
+
static int __net_init ipgre_tap_init_net(struct net *net)
{
- return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, NULL);
+ return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
}
static void __net_exit ipgre_tap_exit_net(struct net *net)
@@ -881,7 +1263,7 @@ static int __init ipgre_init(void)
if (err < 0)
goto pnet_tap_faied;
- err = gre_cisco_register(&ipgre_protocol);
+ err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
if (err < 0) {
pr_info("%s: can't add protocol\n", __func__);
goto add_proto_failed;
@@ -900,7 +1282,7 @@ static int __init ipgre_init(void)
tap_ops_failed:
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_failed:
- gre_cisco_unregister(&ipgre_protocol);
+ gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
add_proto_failed:
unregister_pernet_device(&ipgre_tap_net_ops);
pnet_tap_faied:
@@ -912,7 +1294,7 @@ static void __exit ipgre_fini(void)
{
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
- gre_cisco_unregister(&ipgre_protocol);
+ gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
unregister_pernet_device(&ipgre_tap_net_ops);
unregister_pernet_device(&ipgre_net_ops);
}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 2db4c8773c1b..f4fc8a77aaa7 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -146,6 +146,7 @@
#include <net/xfrm.h>
#include <linux/mroute.h>
#include <linux/netlink.h>
+#include <net/dst_metadata.h>
/*
* Process Router Attention IP option (RFC 2113)
@@ -331,7 +332,7 @@ static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
* Initialise the virtual path cache for the packet. It describes
* how the packet travels inside Linux networking.
*/
- if (!skb_dst(skb)) {
+ if (!skb_valid_dst(skb)) {
int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
iph->tos, skb->dev);
if (unlikely(err)) {
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 626d9e56a6bd..cbb51f3fac06 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -230,10 +230,13 @@ skip_key_lookup:
if (cand)
return cand;
+ t = rcu_dereference(itn->collect_md_tun);
+ if (t)
+ return t;
+
if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
return netdev_priv(itn->fb_tunnel_dev);
-
return NULL;
}
EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
@@ -261,11 +264,15 @@ static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
{
struct hlist_head *head = ip_bucket(itn, &t->parms);
+ if (t->collect_md)
+ rcu_assign_pointer(itn->collect_md_tun, t);
hlist_add_head_rcu(&t->hash_node, head);
}
-static void ip_tunnel_del(struct ip_tunnel *t)
+static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
{
+ if (t->collect_md)
+ rcu_assign_pointer(itn->collect_md_tun, NULL);
hlist_del_init_rcu(&t->hash_node);
}
@@ -419,7 +426,8 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
}
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
- const struct tnl_ptk_info *tpi, bool log_ecn_error)
+ const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+ bool log_ecn_error)
{
struct pcpu_sw_netstats *tstats;
const struct iphdr *iph = ip_hdr(skb);
@@ -478,6 +486,9 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
skb->dev = tunnel->dev;
}
+ if (tun_dst)
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
gro_cells_receive(&tunnel->gro_cells, skb);
return 0;
@@ -806,7 +817,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
struct ip_tunnel_parm *p,
bool set_mtu)
{
- ip_tunnel_del(t);
+ ip_tunnel_del(itn, t);
t->parms.iph.saddr = p->iph.saddr;
t->parms.iph.daddr = p->iph.daddr;
t->parms.i_key = p->i_key;
@@ -967,7 +978,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
if (itn->fb_tunnel_dev != dev) {
- ip_tunnel_del(netdev_priv(dev));
+ ip_tunnel_del(itn, netdev_priv(dev));
unregister_netdevice_queue(dev, head);
}
}
@@ -1072,8 +1083,13 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
nt = netdev_priv(dev);
itn = net_generic(net, nt->ip_tnl_net_id);
- if (ip_tunnel_find(itn, p, dev->type))
- return -EEXIST;
+ if (nt->collect_md) {
+ if (rtnl_dereference(itn->collect_md_tun))
+ return -EEXIST;
+ } else {
+ if (ip_tunnel_find(itn, p, dev->type))
+ return -EEXIST;
+ }
nt->net = net;
nt->parms = *p;
@@ -1089,7 +1105,6 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
dev->mtu = mtu;
ip_tunnel_add(itn, nt);
-
out:
return err;
}
@@ -1163,6 +1178,10 @@ int ip_tunnel_init(struct net_device *dev)
iph->version = 4;
iph->ihl = 5;
+ if (tunnel->collect_md) {
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ netif_keep_dst(dev);
+ }
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
@@ -1176,7 +1195,7 @@ void ip_tunnel_uninit(struct net_device *dev)
itn = net_generic(net, tunnel->ip_tnl_net_id);
/* fb_tunnel_dev will be unregisted in net-exit call. */
if (itn->fb_tunnel_dev != dev)
- ip_tunnel_del(netdev_priv(dev));
+ ip_tunnel_del(itn, netdev_priv(dev));
ip_tunnel_dst_reset_all(tunnel);
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 6a51a71a6c67..5512f4e4ec1b 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -32,6 +32,7 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/static_key.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -190,3 +191,123 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
return tot;
}
EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
+
+static const struct nla_policy ip_tun_policy[IP_TUN_MAX + 1] = {
+ [IP_TUN_ID] = { .type = NLA_U64 },
+ [IP_TUN_DST] = { .type = NLA_U32 },
+ [IP_TUN_SRC] = { .type = NLA_U32 },
+ [IP_TUN_TTL] = { .type = NLA_U8 },
+ [IP_TUN_TOS] = { .type = NLA_U8 },
+ [IP_TUN_SPORT] = { .type = NLA_U16 },
+ [IP_TUN_DPORT] = { .type = NLA_U16 },
+ [IP_TUN_FLAGS] = { .type = NLA_U16 },
+};
+
+static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+ struct lwtunnel_state **ts)
+{
+ struct ip_tunnel_info *tun_info;
+ struct lwtunnel_state *new_state;
+ struct nlattr *tb[IP_TUN_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, IP_TUN_MAX, attr, ip_tun_policy);
+ if (err < 0)
+ return err;
+
+ new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+ if (!new_state)
+ return -ENOMEM;
+
+ new_state->type = LWTUNNEL_ENCAP_IP;
+
+ tun_info = lwt_tun_info(new_state);
+
+ if (tb[IP_TUN_ID])
+ tun_info->key.tun_id = nla_get_u64(tb[IP_TUN_ID]);
+
+ if (tb[IP_TUN_DST])
+ tun_info->key.ipv4_dst = nla_get_be32(tb[IP_TUN_DST]);
+
+ if (tb[IP_TUN_SRC])
+ tun_info->key.ipv4_src = nla_get_be32(tb[IP_TUN_SRC]);
+
+ if (tb[IP_TUN_TTL])
+ tun_info->key.ipv4_ttl = nla_get_u8(tb[IP_TUN_TTL]);
+
+ if (tb[IP_TUN_TOS])
+ tun_info->key.ipv4_tos = nla_get_u8(tb[IP_TUN_TOS]);
+
+ if (tb[IP_TUN_SPORT])
+ tun_info->key.tp_src = nla_get_be16(tb[IP_TUN_SPORT]);
+
+ if (tb[IP_TUN_DPORT])
+ tun_info->key.tp_dst = nla_get_be16(tb[IP_TUN_DPORT]);
+
+ if (tb[IP_TUN_FLAGS])
+ tun_info->key.tun_flags = nla_get_u16(tb[IP_TUN_FLAGS]);
+
+ tun_info->mode = IP_TUNNEL_INFO_TX;
+ tun_info->options = NULL;
+ tun_info->options_len = 0;
+
+ *ts = new_state;
+
+ return 0;
+}
+
+static int ip_tun_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+ if (nla_put_u64(skb, IP_TUN_ID, tun_info->key.tun_id) ||
+ nla_put_be32(skb, IP_TUN_DST, tun_info->key.ipv4_dst) ||
+ nla_put_be32(skb, IP_TUN_SRC, tun_info->key.ipv4_src) ||
+ nla_put_u8(skb, IP_TUN_TOS, tun_info->key.ipv4_tos) ||
+ nla_put_u8(skb, IP_TUN_TTL, tun_info->key.ipv4_ttl) ||
+ nla_put_u16(skb, IP_TUN_SPORT, tun_info->key.tp_src) ||
+ nla_put_u16(skb, IP_TUN_DPORT, tun_info->key.tp_dst) ||
+ nla_put_u16(skb, IP_TUN_FLAGS, tun_info->key.tun_flags))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ return nla_total_size(8) /* IP_TUN_ID */
+ + nla_total_size(4) /* IP_TUN_DST */
+ + nla_total_size(4) /* IP_TUN_SRC */
+ + nla_total_size(1) /* IP_TUN_TOS */
+ + nla_total_size(1) /* IP_TUN_TTL */
+ + nla_total_size(2) /* IP_TUN_SPORT */
+ + nla_total_size(2) /* IP_TUN_DPORT */
+ + nla_total_size(2); /* IP_TUN_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
+ .build_state = ip_tun_build_state,
+ .fill_encap = ip_tun_fill_encap_info,
+ .get_encap_size = ip_tun_encap_nlsize,
+};
+
+void __init ip_tunnel_core_init(void)
+{
+ lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+}
+
+struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
+
+void ip_tunnel_need_metadata(void)
+{
+ static_key_slow_inc(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
+
+void ip_tunnel_unneed_metadata(void)
+{
+ static_key_slow_dec(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 8e7328c6a390..ed4ef09c2136 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -94,7 +94,7 @@
/* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */
#define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */
#define CONF_SEND_RETRIES 6 /* Send six requests per open */
-#define CONF_INTER_TIMEOUT (HZ/2) /* Inter-device timeout: 1/2 second */
+#define CONF_INTER_TIMEOUT (HZ) /* Inter-device timeout: 1 second */
#define CONF_BASE_TIMEOUT (HZ*2) /* Initial timeout: 2 seconds */
#define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */
#define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 254238daf58b..f34c31defafe 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -198,7 +198,7 @@ static int ipip_rcv(struct sk_buff *skb)
goto drop;
if (iptunnel_pull_header(skb, 0, tpi.proto))
goto drop;
- return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+ return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
}
return -1;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 92305a1a021a..c416cb355cb0 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -240,7 +240,7 @@ get_entry(const void *base, unsigned int offset)
return (struct arpt_entry *)(base + offset);
}
-static inline __pure
+static inline
struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
{
return (void *)entry + entry->next_offset;
@@ -280,6 +280,9 @@ unsigned int arpt_do_table(struct sk_buff *skb,
table_base = private->entries;
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
+ /* No TEE support for arptables, so no need to switch to alternate
+ * stack. All targets that reenter must return absolute verdicts.
+ */
e = get_entry(table_base, private->hook_entry[hook]);
acpar.in = state->in;
@@ -325,11 +328,6 @@ unsigned int arpt_do_table(struct sk_buff *skb,
}
if (table_base + v
!= arpt_next_entry(e)) {
-
- if (stackidx >= private->stacksize) {
- verdict = NF_DROP;
- break;
- }
jumpstack[stackidx++] = e;
}
@@ -337,9 +335,6 @@ unsigned int arpt_do_table(struct sk_buff *skb,
continue;
}
- /* Targets which reenter must return
- * abs. verdicts
- */
acpar.target = t->u.kernel.target;
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
@@ -372,10 +367,13 @@ static inline bool unconditional(const struct arpt_arp *arp)
/* Figures out from what hook each rule can be called: returns 0 if
* there are loops. Puts hook bitmask in comefrom.
+ *
+ * Keeps track of largest call depth seen and stores it in newinfo->stacksize.
*/
-static int mark_source_chains(const struct xt_table_info *newinfo,
+static int mark_source_chains(struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
+ unsigned int calldepth, max_calldepth = 0;
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
@@ -391,6 +389,7 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
/* Set initial back pointer. */
e->counters.pcnt = pos;
+ calldepth = 0;
for (;;) {
const struct xt_standard_target *t
@@ -445,6 +444,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
(entry0 + pos + size);
e->counters.pcnt = pos;
pos += size;
+ if (calldepth > 0)
+ --calldepth;
} else {
int newpos = t->verdict;
@@ -459,6 +460,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
return 0;
}
+ if (entry0 + newpos != arpt_next_entry(e) &&
+ ++calldepth > max_calldepth)
+ max_calldepth = calldepth;
+
/* This a jump; chase it. */
duprintf("Jump rule %u -> %u\n",
pos, newpos);
@@ -475,6 +480,7 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
next:
duprintf("Finished chain %u\n", hook);
}
+ newinfo->stacksize = max_calldepth;
return 1;
}
@@ -664,9 +670,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
if (ret != 0)
break;
++i;
- if (strcmp(arpt_get_target(iter)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
}
duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
if (ret != 0)
@@ -1439,9 +1442,6 @@ static int translate_compat_table(const char *name,
break;
}
++i;
- if (strcmp(arpt_get_target(iter1)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
}
if (ret) {
/*
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 6c72fbb7b49e..787f99ed55e2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -276,7 +276,7 @@ static void trace_packet(const struct sk_buff *skb,
}
#endif
-static inline __pure
+static inline
struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
{
return (void *)entry + entry->next_offset;
@@ -296,12 +296,13 @@ ipt_do_table(struct sk_buff *skb,
const char *indev, *outdev;
const void *table_base;
struct ipt_entry *e, **jumpstack;
- unsigned int *stackptr, origptr, cpu;
+ unsigned int stackidx, cpu;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
/* Initialization */
+ stackidx = 0;
ip = ip_hdr(skb);
indev = state->in ? state->in->name : nulldevname;
outdev = state->out ? state->out->name : nulldevname;
@@ -331,13 +332,21 @@ ipt_do_table(struct sk_buff *skb,
smp_read_barrier_depends();
table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
- stackptr = per_cpu_ptr(private->stackptr, cpu);
- origptr = *stackptr;
+
+ /* Switch to alternate jumpstack if we're being invoked via TEE.
+ * TEE issues XT_CONTINUE verdict on original skb so we must not
+ * clobber the jumpstack.
+ *
+ * For recursion via REJECT or SYNPROXY the stack will be clobbered
+ * but it is no problem since absolute verdict is issued by these.
+ */
+ if (static_key_false(&xt_tee_enabled))
+ jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
e = get_entry(table_base, private->hook_entry[hook]);
- pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
- table->name, hook, origptr,
+ pr_debug("Entering %s(hook %u), UF %p\n",
+ table->name, hook,
get_entry(table_base, private->underflow[hook]));
do {
@@ -383,28 +392,24 @@ ipt_do_table(struct sk_buff *skb,
verdict = (unsigned int)(-v) - 1;
break;
}
- if (*stackptr <= origptr) {
+ if (stackidx == 0) {
e = get_entry(table_base,
private->underflow[hook]);
pr_debug("Underflow (this is normal) "
"to %p\n", e);
} else {
- e = jumpstack[--*stackptr];
+ e = jumpstack[--stackidx];
pr_debug("Pulled %p out from pos %u\n",
- e, *stackptr);
+ e, stackidx);
e = ipt_next_entry(e);
}
continue;
}
if (table_base + v != ipt_next_entry(e) &&
!(e->ip.flags & IPT_F_GOTO)) {
- if (*stackptr >= private->stacksize) {
- verdict = NF_DROP;
- break;
- }
- jumpstack[(*stackptr)++] = e;
+ jumpstack[stackidx++] = e;
pr_debug("Pushed %p into pos %u\n",
- e, *stackptr - 1);
+ e, stackidx - 1);
}
e = get_entry(table_base, v);
@@ -423,9 +428,8 @@ ipt_do_table(struct sk_buff *skb,
/* Verdict */
break;
} while (!acpar.hotdrop);
- pr_debug("Exiting %s; resetting sp from %u to %u\n",
- __func__, *stackptr, origptr);
- *stackptr = origptr;
+ pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
+
xt_write_recseq_end(addend);
local_bh_enable();
@@ -439,11 +443,15 @@ ipt_do_table(struct sk_buff *skb,
}
/* Figures out from what hook each rule can be called: returns 0 if
- there are loops. Puts hook bitmask in comefrom. */
+ * there are loops. Puts hook bitmask in comefrom.
+ *
+ * Keeps track of largest call depth seen and stores it in newinfo->stacksize.
+ */
static int
-mark_source_chains(const struct xt_table_info *newinfo,
+mark_source_chains(struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
+ unsigned int calldepth, max_calldepth = 0;
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
@@ -457,6 +465,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
/* Set initial back pointer. */
e->counters.pcnt = pos;
+ calldepth = 0;
for (;;) {
const struct xt_standard_target *t
@@ -518,6 +527,9 @@ mark_source_chains(const struct xt_table_info *newinfo,
(entry0 + pos + size);
e->counters.pcnt = pos;
pos += size;
+ WARN_ON_ONCE(calldepth == 0);
+ if (calldepth > 0)
+ --calldepth;
} else {
int newpos = t->verdict;
@@ -531,9 +543,14 @@ mark_source_chains(const struct xt_table_info *newinfo,
newpos);
return 0;
}
+ if (entry0 + newpos != ipt_next_entry(e) &&
+ !(e->ip.flags & IPT_F_GOTO) &&
+ ++calldepth > max_calldepth)
+ max_calldepth = calldepth;
+
/* This a jump; chase it. */
- duprintf("Jump rule %u -> %u\n",
- pos, newpos);
+ duprintf("Jump rule %u -> %u, calldepth %d\n",
+ pos, newpos, calldepth);
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
@@ -547,6 +564,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
next:
duprintf("Finished chain %u\n", hook);
}
+ newinfo->stacksize = max_calldepth;
return 1;
}
@@ -826,9 +844,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
if (ret != 0)
return ret;
++i;
- if (strcmp(ipt_get_target(iter)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
}
if (i != repl->num_entries) {
@@ -1744,9 +1759,6 @@ translate_compat_table(struct net *net,
if (ret != 0)
break;
++i;
- if (strcmp(ipt_get_target(iter1)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
}
if (ret) {
/*
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index c88b7d434718..b69e82bda215 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -49,12 +49,9 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
if (skb->nfct)
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
#endif
-
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+ if (nf_bridge_in_prerouting(skb))
return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
-#endif
+
if (hooknum == NF_INET_PRE_ROUTING)
return IP_DEFRAG_CONNTRACK_IN + zone;
else
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 05ff44b758df..e89094ab5ddb 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -363,7 +363,8 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
scoped);
rcu_read_unlock();
- if (!(isk->freebind || isk->transparent || has_addr ||
+ if (!(net->ipv6.sysctl.ip_nonlocal_bind ||
+ isk->freebind || isk->transparent || has_addr ||
addr_type == IPV6_ADDR_ANY))
return -EADDRNOTAVAIL;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index da5d483e236a..3abd9d7a3adf 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -300,6 +300,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
+ SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
+ SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e681b852ced1..18fd7c9095c7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/dst.h>
+#include <net/dst_metadata.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
@@ -102,6 +103,7 @@
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
+#include <net/lwtunnel.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
@@ -109,6 +111,7 @@
#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
+#include <net/ip_tunnels.h>
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -1355,6 +1358,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
list_del(&rt->rt_uncached);
spin_unlock_bh(&ul->lock);
}
+ lwtstate_put(rt->rt_lwtstate);
}
void rt_flush_dev(struct net_device *dev)
@@ -1403,6 +1407,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
+ rt->rt_lwtstate = lwtstate_get(nh->nh_lwtstate);
if (unlikely(fnhe))
cached = rt_bind_exception(rt, fnhe, daddr);
else if (!(rt->dst.flags & DST_NOCACHE))
@@ -1488,6 +1493,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
+ rth->rt_lwtstate = NULL;
if (our) {
rth->dst.input= ip_local_deliver;
rth->rt_flags |= RTCF_LOCAL;
@@ -1546,7 +1552,6 @@ static int __mkroute_input(struct sk_buff *skb,
struct rtable *rth;
int err;
struct in_device *out_dev;
- unsigned int flags = 0;
bool do_cache;
u32 itag = 0;
@@ -1610,7 +1615,7 @@ static int __mkroute_input(struct sk_buff *skb,
}
rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
- rth->rt_flags = flags;
+ rth->rt_flags = 0;
rth->rt_type = res->type;
rth->rt_is_input = 1;
rth->rt_iif = 0;
@@ -1618,12 +1623,15 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
+ rth->rt_lwtstate = NULL;
RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
+ if (lwtunnel_output_redirect(rth->rt_lwtstate))
+ rth->dst.output = lwtunnel_output;
skb_dst_set(skb, &rth->dst);
out:
err = 0;
@@ -1662,6 +1670,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
{
struct fib_result res;
struct in_device *in_dev = __in_dev_get_rcu(dev);
+ struct ip_tunnel_info *tun_info;
struct flowi4 fl4;
unsigned int flags = 0;
u32 itag = 0;
@@ -1679,6 +1688,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
by fib_lookup.
*/
+ tun_info = skb_tunnel_info(skb, AF_INET);
+ if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
+ fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
+ else
+ fl4.flowi4_tun_key.tun_id = 0;
+ skb_dst_drop(skb);
+
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
@@ -1792,6 +1808,8 @@ local_input:
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
+ rth->rt_lwtstate = NULL;
+
RT_CACHE_STAT_INC(in_slow_tot);
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
@@ -1981,7 +1999,7 @@ add:
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
-
+ rth->rt_lwtstate = NULL;
RT_CACHE_STAT_INC(out_slow_tot);
if (flags & RTCF_LOCAL)
@@ -2004,6 +2022,8 @@ add:
}
rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
+ if (lwtunnel_output_redirect(rth->rt_lwtstate))
+ rth->dst.output = lwtunnel_output;
return rth;
}
@@ -2261,7 +2281,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_uses_gateway = ort->rt_uses_gateway;
INIT_LIST_HEAD(&rt->rt_uncached);
-
+ rt->rt_lwtstate = NULL;
dst_free(new);
}
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index c037644eafb7..fd1405d37c14 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -146,7 +146,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else {
bictcp_update(ca, tp->snd_cwnd);
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 8c6fd3d5e40f..167b6a3e1b98 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -264,7 +264,7 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 prior_snd_cwnd;
u32 incr;
- if (tp->snd_cwnd < tp->snd_ssthresh && hystart_detect)
+ if (tcp_in_slow_start(tp) && hystart_detect)
tcp_cdg_hystart_update(sk);
if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 84be008c945c..a2ed23c595cf 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -365,10 +365,8 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
*/
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
- u32 cwnd = tp->snd_cwnd + acked;
+ u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
- if (cwnd > tp->snd_ssthresh)
- cwnd = tp->snd_ssthresh + 1;
acked -= cwnd - tp->snd_cwnd;
tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
@@ -413,7 +411,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
/* In "safe" area, increase. */
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ if (tcp_in_slow_start(tp)) {
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 06d3d665a9fd..28011fb1f4a2 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -320,7 +320,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ if (tcp_in_slow_start(tp)) {
if (hystart && after(ack, ca->end_seq))
bictcp_hystart_reset(sk);
acked = tcp_slow_start(tp, acked);
@@ -439,7 +439,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
ca->delay_min = delay;
/* hystart triggers when cwnd is larger than some threshold */
- if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
+ if (hystart && tcp_in_slow_start(tp) &&
tp->snd_cwnd >= hystart_low_window)
hystart_update(sk, delay);
}
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 882c08aae2f5..db7842495a64 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -116,7 +116,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else {
/* Update AIMD parameters.
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 58469fff6c18..82f0d9ed60f5 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -236,7 +236,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else {
/* In dangerous area, increase slowly.
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index f963b274f2b0..083831e359df 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -112,7 +112,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
rho_fractions = ca->rho_3ls - (ca->rho << 3);
- if (tp->snd_cwnd < tp->snd_ssthresh) {
+ if (tcp_in_slow_start(tp)) {
/*
* slow start
* INC = 2^RHO - 1
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index f71002e4db0b..2ab9bbb6faff 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -268,7 +268,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
/* In slow start */
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 728f5b3d3c64..4e4d6bcd0ca9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -109,6 +109,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
#define FLAG_DATA_SACKED 0x20 /* New SACK. */
#define FLAG_ECE 0x40 /* ECE in this ACK */
+#define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */
#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -196,11 +197,13 @@ static void tcp_enter_quickack_mode(struct sock *sk)
* and the session is not interactive.
*/
-static inline bool tcp_in_quickack_mode(const struct sock *sk)
+static bool tcp_in_quickack_mode(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
- return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
+ return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
+ (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
}
static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
@@ -1037,7 +1040,7 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
* highest SACK block). Also calculate the lowest snd_nxt among the remaining
* retransmitted skbs to avoid some costly processing per ACKs.
*/
-static void tcp_mark_lost_retrans(struct sock *sk)
+static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -1078,7 +1081,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
if (after(received_upto, ack_seq)) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
-
+ *flag |= FLAG_LOST_RETRANS;
tcp_skb_mark_lost_uncond_verify(tp, skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
} else {
@@ -1818,7 +1821,7 @@ advance_sp:
((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
- tcp_mark_lost_retrans(sk);
+ tcp_mark_lost_retrans(sk, &state->flag);
tcp_verify_left_out(tp);
out:
@@ -2474,15 +2477,14 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
return false;
}
-/* The cwnd reduction in CWR and Recovery use the PRR algorithm
- * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
+/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
* It computes the number of packets to send (sndcnt) based on packets newly
* delivered:
* 1) If the packets in flight is larger than ssthresh, PRR spreads the
* cwnd reductions across a full RTT.
- * 2) If packets in flight is lower than ssthresh (such as due to excess
- * losses and/or application stalls), do not perform any further cwnd
- * reductions, but instead slow start up to ssthresh.
+ * 2) Otherwise PRR uses packet conservation to send as much as delivered.
+ * But when the retransmits are acked without further losses, PRR
+ * slow starts cwnd up to ssthresh to speed up the recovery.
*/
static void tcp_init_cwnd_reduction(struct sock *sk)
{
@@ -2499,7 +2501,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
}
static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
- int fast_rexmit)
+ int fast_rexmit, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int sndcnt = 0;
@@ -2508,16 +2510,18 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
(tp->packets_out - tp->sacked_out);
tp->prr_delivered += newly_acked_sacked;
- if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+ if (delta < 0) {
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
tp->prior_cwnd - 1;
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
- } else {
+ } else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
+ !(flag & FLAG_LOST_RETRANS)) {
sndcnt = min_t(int, delta,
max_t(int, tp->prr_delivered - tp->prr_out,
newly_acked_sacked) + 1);
+ } else {
+ sndcnt = min(delta, newly_acked_sacked);
}
-
sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
}
@@ -2578,7 +2582,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
tcp_try_keep_open(sk);
} else {
- tcp_cwnd_reduction(sk, prior_unsacked, 0);
+ tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
}
}
@@ -2588,6 +2592,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
icsk->icsk_mtup.probe_size = 0;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
}
static void tcp_mtup_probe_success(struct sock *sk)
@@ -2607,6 +2612,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
icsk->icsk_mtup.probe_size = 0;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
}
/* Do a simple retransmit without using the backoff mechanisms in
@@ -2675,7 +2681,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
tp->prior_ssthresh = 0;
tcp_init_undo(tp);
- if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+ if (!tcp_in_cwnd_reduction(sk)) {
if (!ece_ack)
tp->prior_ssthresh = tcp_current_ssthresh(sk);
tcp_init_cwnd_reduction(sk);
@@ -2735,7 +2741,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
/* Undo during fast recovery after partial ACK. */
static bool tcp_try_undo_partial(struct sock *sk, const int acked,
- const int prior_unsacked)
+ const int prior_unsacked, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2751,7 +2757,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
* mark more packets lost or retransmit more.
*/
if (tp->retrans_out) {
- tcp_cwnd_reduction(sk, prior_unsacked, 0);
+ tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
return true;
}
@@ -2838,7 +2844,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
if (tcp_is_reno(tp) && is_dupack)
tcp_add_reno_sack(sk);
} else {
- if (tcp_try_undo_partial(sk, acked, prior_unsacked))
+ if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
return;
/* Partial ACK arrived. Force fast retransmit. */
do_lost = tcp_is_reno(tp) ||
@@ -2851,9 +2857,10 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
break;
case TCP_CA_Loss:
tcp_process_loss(sk, flag, is_dupack);
- if (icsk->icsk_ca_state != TCP_CA_Open)
+ if (icsk->icsk_ca_state != TCP_CA_Open &&
+ !(flag & FLAG_LOST_RETRANS))
return;
- /* Fall through to processing in Open state. */
+ /* Change state if cwnd is undone or retransmits are lost */
default:
if (tcp_is_reno(tp)) {
if (flag & FLAG_SND_UNA_ADVANCED)
@@ -2888,7 +2895,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
if (do_lost)
tcp_update_scoreboard(sk, fast_rexmit);
- tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
+ tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
tcp_xmit_retransmit_queue(sk);
}
@@ -3562,10 +3569,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
&sack_state);
acked -= tp->packets_out;
- /* Advance cwnd if state allows */
- if (tcp_may_raise_cwnd(sk, flag))
- tcp_cong_avoid(sk, ack, acked);
-
if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, acked, prior_unsacked,
@@ -3574,6 +3577,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
+ /* Advance cwnd if state allows */
+ if (tcp_may_raise_cwnd(sk, flag))
+ tcp_cong_avoid(sk, ack, acked);
+
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
struct dst_entry *dst = __sk_dst_get(sk);
if (dst)
@@ -3947,7 +3954,6 @@ void tcp_reset(struct sock *sk)
static void tcp_fin(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- const struct dst_entry *dst;
inet_csk_schedule_ack(sk);
@@ -3959,9 +3965,7 @@ static void tcp_fin(struct sock *sk)
case TCP_ESTABLISHED:
/* Move to CLOSE_WAIT */
tcp_set_state(sk, TCP_CLOSE_WAIT);
- dst = __sk_dst_get(sk);
- if (!dst || !dst_metric(dst, RTAX_QUICKACK))
- inet_csk(sk)->icsk_ack.pingpong = 1;
+ inet_csk(sk)->icsk_ack.pingpong = 1;
break;
case TCP_CLOSE_WAIT:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0ea2e1c5d395..93898e093d4e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -222,7 +222,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (err)
goto failure;
- inet_set_txhash(sk);
+ sk_set_txhash(sk);
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
@@ -1277,7 +1277,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->rcv_tos = ip_hdr(skb)->tos;
inet_csk(newsk)->icsk_ext_hdr_len = 0;
- inet_set_txhash(newsk);
+ sk_set_txhash(newsk);
if (inet_opt)
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
newinet->inet_id = newtp->write_seq ^ jiffies;
@@ -1683,8 +1683,7 @@ do_time_wait:
iph->daddr, th->dest,
inet_iif(skb));
if (sk2) {
- inet_twsk_deschedule(inet_twsk(sk));
- inet_twsk_put(inet_twsk(sk));
+ inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
goto process;
}
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index a51d63a43e33..b3d64f61d922 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -461,7 +461,7 @@ void tcp_update_metrics(struct sock *sk)
tcp_metric_set(tm, TCP_METRIC_CWND,
tp->snd_cwnd);
}
- } else if (tp->snd_cwnd > tp->snd_ssthresh &&
+ } else if (!tcp_in_slow_start(tp) &&
icsk->icsk_ca_state == TCP_CA_Open) {
/* Cong. avoidance phase, cwnd is reliable. */
if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 4bc00cb79e60..6d8795b066ac 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -147,8 +147,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
if (!th->fin ||
TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
kill_with_rst:
- inet_twsk_deschedule(tw);
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
return TCP_TW_RST;
}
@@ -198,8 +197,7 @@ kill_with_rst:
*/
if (sysctl_tcp_rfc1337 == 0) {
kill:
- inet_twsk_deschedule(tw);
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
return TCP_TW_SUCCESS;
}
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b1c218df2c85..7d1efa762b75 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -163,7 +163,6 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
{
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
- const struct dst_entry *dst = __sk_dst_get(sk);
if (sysctl_tcp_slow_start_after_idle &&
(!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
@@ -174,9 +173,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
/* If it is a reply for ato after last received
* packet, enter pingpong mode.
*/
- if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
- (!dst || !dst_metric(dst, RTAX_QUICKACK)))
- icsk->icsk_ack.pingpong = 1;
+ if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+ icsk->icsk_ack.pingpong = 1;
}
/* Account for an ACK we sent. */
@@ -1776,7 +1774,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
- if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
+ if (icsk->icsk_ca_state >= TCP_CA_Recovery)
goto send_now;
/* Avoid bursty behavior by allowing defer
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 333bcb2415ff..bf5ea9e9bbc1 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -22,7 +22,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else
tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 5b752f58a900..7149ebc820c7 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -649,4 +649,3 @@ void tcp_init_xmit_timers(struct sock *sk)
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
&tcp_keepalive_timer);
}
-EXPORT_SYMBOL(tcp_init_xmit_timers);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a6cea1d5e20d..13951c4087d4 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -225,7 +225,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
*/
diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
- if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
+ if (diff > gamma && tcp_in_slow_start(tp)) {
/* Going too fast. Time to slow down
* and switch to congestion avoidance.
*/
@@ -240,7 +240,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
- } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ } else if (tcp_in_slow_start(tp)) {
/* Slow start. */
tcp_slow_start(tp, acked);
} else {
@@ -281,7 +281,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
vegas->minRTT = 0x7fffffff;
}
/* Use normal slow start */
- else if (tp->snd_cwnd <= tp->snd_ssthresh)
+ else if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
}
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 112151eeee45..0d094b995cd9 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -150,7 +150,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ if (tcp_in_slow_start(tp)) {
/* Slow start. */
tcp_slow_start(tp, acked);
} else {
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 438a73aa777c..643f61339e7b 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -5,16 +5,15 @@
# IPv6 as module will cause a CRASH if you try to unload it
menuconfig IPV6
tristate "The IPv6 protocol"
- default m
+ default y
---help---
- This is complemental support for the IP version 6.
- You will still be able to do traditional IPv4 networking as well.
+ Support for IP version 6 (IPv6).
For general information about IPv6, see
<https://en.wikipedia.org/wiki/IPv6>.
- For Linux IPv6 development information, see <http://www.linux-ipv6.org>.
- For specific information about IPv6 under Linux, read the HOWTO at
- <http://www.bieringer.de/linux/IPv6/>.
+ For specific information about IPv6 under Linux, see
+ Documentation/networking/ipv6.txt and read the HOWTO at
+ <http://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/>
To compile this protocol support as a module, choose M here: the
module will be called ipv6.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 21c2c818df3b..53e3a9d756b0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -195,6 +195,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_from_local = 0,
+ .accept_ra_min_hop_limit= 1,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
@@ -211,7 +212,8 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_ra_mtu = 1,
.stable_secret = {
.initialized = false,
- }
+ },
+ .use_oif_addrs_only = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -236,6 +238,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_from_local = 0,
+ .accept_ra_min_hop_limit= 1,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
@@ -253,6 +256,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.stable_secret = {
.initialized = false,
},
+ .use_oif_addrs_only = 0,
};
/* Check if a valid qdisc is available */
@@ -1358,15 +1362,96 @@ out:
return ret;
}
+static int __ipv6_dev_get_saddr(struct net *net,
+ struct ipv6_saddr_dst *dst,
+ struct inet6_dev *idev,
+ struct ipv6_saddr_score *scores,
+ int hiscore_idx)
+{
+ struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
+ int i;
+
+ /*
+ * - Tentative Address (RFC2462 section 5.4)
+ * - A tentative address is not considered
+ * "assigned to an interface" in the traditional
+ * sense, unless it is also flagged as optimistic.
+ * - Candidate Source Address (section 4)
+ * - In any case, anycast addresses, multicast
+ * addresses, and the unspecified address MUST
+ * NOT be included in a candidate set.
+ */
+ if ((score->ifa->flags & IFA_F_TENTATIVE) &&
+ (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
+ continue;
+
+ score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+
+ if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
+ score->addr_type & IPV6_ADDR_MULTICAST)) {
+ net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
+ idev->dev->name);
+ continue;
+ }
+
+ score->rule = -1;
+ bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
+
+ for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
+ int minihiscore, miniscore;
+
+ minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
+ miniscore = ipv6_get_saddr_eval(net, score, dst, i);
+
+ if (minihiscore > miniscore) {
+ if (i == IPV6_SADDR_RULE_SCOPE &&
+ score->scopedist > 0) {
+ /*
+ * special case:
+ * each remaining entry
+ * has too small (not enough)
+ * scope, because ifa entries
+ * are sorted by their scope
+ * values.
+ */
+ goto out;
+ }
+ break;
+ } else if (minihiscore < miniscore) {
+ if (hiscore->ifa)
+ in6_ifa_put(hiscore->ifa);
+
+ in6_ifa_hold(score->ifa);
+
+ swap(hiscore, score);
+ hiscore_idx = 1 - hiscore_idx;
+
+ /* restore our iterator */
+ score->ifa = hiscore->ifa;
+
+ break;
+ }
+ }
+ }
+out:
+ read_unlock_bh(&idev->lock);
+ return hiscore_idx;
+}
+
int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
const struct in6_addr *daddr, unsigned int prefs,
struct in6_addr *saddr)
{
- struct ipv6_saddr_score scores[2],
- *score = &scores[0], *hiscore = &scores[1];
+ struct ipv6_saddr_score scores[2], *hiscore;
struct ipv6_saddr_dst dst;
+ struct inet6_dev *idev;
struct net_device *dev;
int dst_type;
+ bool use_oif_addr = false;
+ int hiscore_idx = 0;
dst_type = __ipv6_addr_type(daddr);
dst.addr = daddr;
@@ -1375,105 +1460,50 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
dst.prefs = prefs;
- hiscore->rule = -1;
- hiscore->ifa = NULL;
+ scores[hiscore_idx].rule = -1;
+ scores[hiscore_idx].ifa = NULL;
rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- struct inet6_dev *idev;
-
- /* Candidate Source Address (section 4)
- * - multicast and link-local destination address,
- * the set of candidate source address MUST only
- * include addresses assigned to interfaces
- * belonging to the same link as the outgoing
- * interface.
- * (- For site-local destination addresses, the
- * set of candidate source addresses MUST only
- * include addresses assigned to interfaces
- * belonging to the same site as the outgoing
- * interface.)
- */
- if (((dst_type & IPV6_ADDR_MULTICAST) ||
- dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
- dst.ifindex && dev->ifindex != dst.ifindex)
- continue;
-
- idev = __in6_dev_get(dev);
- if (!idev)
- continue;
-
- read_lock_bh(&idev->lock);
- list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
- int i;
-
- /*
- * - Tentative Address (RFC2462 section 5.4)
- * - A tentative address is not considered
- * "assigned to an interface" in the traditional
- * sense, unless it is also flagged as optimistic.
- * - Candidate Source Address (section 4)
- * - In any case, anycast addresses, multicast
- * addresses, and the unspecified address MUST
- * NOT be included in a candidate set.
- */
- if ((score->ifa->flags & IFA_F_TENTATIVE) &&
- (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
- continue;
-
- score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+ /* Candidate Source Address (section 4)
+ * - multicast and link-local destination address,
+ * the set of candidate source address MUST only
+ * include addresses assigned to interfaces
+ * belonging to the same link as the outgoing
+ * interface.
+ * (- For site-local destination addresses, the
+ * set of candidate source addresses MUST only
+ * include addresses assigned to interfaces
+ * belonging to the same site as the outgoing
+ * interface.)
+ * - "It is RECOMMENDED that the candidate source addresses
+ * be the set of unicast addresses assigned to the
+ * interface that will be used to send to the destination
+ * (the 'outgoing' interface)." (RFC 6724)
+ */
+ if (dst_dev) {
+ idev = __in6_dev_get(dst_dev);
+ if ((dst_type & IPV6_ADDR_MULTICAST) ||
+ dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
+ (idev && idev->cnf.use_oif_addrs_only)) {
+ use_oif_addr = true;
+ }
+ }
- if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
- score->addr_type & IPV6_ADDR_MULTICAST)) {
- net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
- dev->name);
+ if (use_oif_addr) {
+ if (idev)
+ hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
+ } else {
+ for_each_netdev_rcu(net, dev) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
continue;
- }
-
- score->rule = -1;
- bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
-
- for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
- int minihiscore, miniscore;
-
- minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
- miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
-
- if (minihiscore > miniscore) {
- if (i == IPV6_SADDR_RULE_SCOPE &&
- score->scopedist > 0) {
- /*
- * special case:
- * each remaining entry
- * has too small (not enough)
- * scope, because ifa entries
- * are sorted by their scope
- * values.
- */
- goto try_nextdev;
- }
- break;
- } else if (minihiscore < miniscore) {
- if (hiscore->ifa)
- in6_ifa_put(hiscore->ifa);
-
- in6_ifa_hold(score->ifa);
-
- swap(hiscore, score);
-
- /* restore our iterator */
- score->ifa = hiscore->ifa;
-
- break;
- }
- }
+ hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
}
-try_nextdev:
- read_unlock_bh(&idev->lock);
}
rcu_read_unlock();
+ hiscore = &scores[hiscore_idx];
if (!hiscore->ifa)
return -EADDRNOTAVAIL;
@@ -4560,6 +4590,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
+ array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
#ifdef CONFIG_IPV6_ROUTER_PREF
array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
@@ -4586,6 +4617,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
/* we omit DEVCONF_STABLE_SECRET for now */
+ array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
}
static inline size_t inet6_ifla6_size(void)
@@ -5456,6 +5488,13 @@ static struct addrconf_sysctl_table
.proc_handler = proc_dointvec,
},
{
+ .procname = "accept_ra_min_hop_limit",
+ .data = &ipv6_devconf.accept_ra_min_hop_limit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "accept_ra_pinfo",
.data = &ipv6_devconf.accept_ra_pinfo,
.maxlen = sizeof(int),
@@ -5585,6 +5624,14 @@ static struct addrconf_sysctl_table
.proc_handler = addrconf_sysctl_stable_secret,
},
{
+ .procname = "use_oif_addrs_only",
+ .data = &ipv6_devconf.use_oif_addrs_only,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+
+ },
+ {
/* sentinel */
}
},
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index ca09bf49ac68..bfa941fc1165 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -107,7 +107,16 @@ int inet6addr_notifier_call_chain(unsigned long val, void *v)
}
EXPORT_SYMBOL(inet6addr_notifier_call_chain);
-const struct ipv6_stub *ipv6_stub __read_mostly;
+static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
+ struct dst_entry **u2,
+ struct flowi6 *u3)
+{
+ return -EAFNOSUPPORT;
+}
+
+const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+ .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
+};
EXPORT_SYMBOL_GPL(ipv6_stub);
/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 7de52b65173f..44bb66bde0e2 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -197,6 +197,7 @@ lookup_protocol:
np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
np->mc_loop = 1;
np->pmtudisc = IPV6_PMTUDISC_WANT;
+ np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
/* Init the ipv4 part of the socket since we can have sockets
@@ -342,7 +343,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
- if (!(inet->freebind || inet->transparent) &&
+ if (!net->ipv6.sysctl.ip_nonlocal_bind &&
+ !(inet->freebind || inet->transparent) &&
!ipv6_chk_addr(net, &addr->sin6_addr,
dev, 0)) {
err = -EADDRNOTAVAIL;
@@ -679,8 +681,8 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
const struct ipv6_pinfo *np = inet6_sk(sk);
if (np->rxopt.all) {
- if ((opt->hop && (np->rxopt.bits.hopopts ||
- np->rxopt.bits.ohopopts)) ||
+ if (((opt->flags & IP6SKB_HOPBYHOP) &&
+ (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) ||
(ip6_flowinfo((struct ipv6hdr *) skb_network_header(skb)) &&
np->rxopt.bits.rxflow) ||
(opt->srcrt && (np->rxopt.bits.srcrt ||
@@ -766,10 +768,10 @@ static int __net_init inet6_net_init(struct net *net)
net->ipv6.sysctl.bindv6only = 0;
net->ipv6.sysctl.icmpv6_time = 1*HZ;
net->ipv6.sysctl.flowlabel_consistency = 1;
- net->ipv6.sysctl.auto_flowlabels = 0;
+ net->ipv6.sysctl.auto_flowlabels = IP6_DEFAULT_AUTO_FLOW_LABELS;
net->ipv6.sysctl.idgen_retries = 3;
net->ipv6.sysctl.idgen_delay = 1 * HZ;
- net->ipv6.sysctl.flowlabel_state_ranges = 1;
+ net->ipv6.sysctl.flowlabel_state_ranges = 0;
atomic_set(&net->ipv6.fib6_sernum, 1);
err = ipv6_init_mibs(net);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b10a88986a98..9aadd57808a5 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -199,7 +199,7 @@ ipv4_connected:
NULL);
sk->sk_state = TCP_ESTABLISHED;
- ip6_set_txhash(sk);
+ sk_set_txhash(sk);
out:
fl6_sock_release(flowlabel);
return err;
@@ -568,8 +568,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
}
/* HbH is allowed only once */
- if (np->rxopt.bits.hopopts && opt->hop) {
- u8 *ptr = nh + opt->hop;
+ if (np->rxopt.bits.hopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+ u8 *ptr = nh + sizeof(struct ipv6hdr);
put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
}
@@ -630,8 +630,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
int hlim = ipv6_hdr(skb)->hop_limit;
put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
}
- if (np->rxopt.bits.ohopopts && opt->hop) {
- u8 *ptr = nh + opt->hop;
+ if (np->rxopt.bits.ohopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+ u8 *ptr = nh + sizeof(struct ipv6hdr);
put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr);
}
if (np->rxopt.bits.odstopts && opt->dst0) {
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index a7bbbe45570b..ce203b0402be 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -632,7 +632,7 @@ int ipv6_parse_hopopts(struct sk_buff *skb)
return -1;
}
- opt->hop = sizeof(struct ipv6hdr);
+ opt->flags |= IP6SKB_HOPBYHOP;
if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
opt = IP6CB(skb);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 713d7434c911..6c2b2132c8d3 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -329,7 +329,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
struct flowi6 fl2;
int err;
- err = ip6_dst_lookup(sk, &dst, fl6);
+ err = ip6_dst_lookup(net, sk, &dst, fl6);
if (err)
return ERR_PTR(err);
@@ -361,7 +361,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
if (err)
goto relookup_failed;
- err = ip6_dst_lookup(sk, &dst2, &fl2);
+ err = ip6_dst_lookup(net, sk, &dst2, &fl2);
if (err)
goto relookup_failed;
@@ -591,7 +591,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
- err = ip6_dst_lookup(sk, &dst, &fl6);
+ err = ip6_dst_lookup(net, sk, &dst, &fl6);
if (err)
goto out;
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b4fd96de97e6..6ac8dad0138a 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -207,7 +207,6 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
struct sock *sk2;
const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw = NULL;
- int twrefcnt = 0;
spin_lock(lock);
@@ -234,21 +233,17 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
WARN_ON(!sk_unhashed(sk));
__sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) {
- twrefcnt = inet_twsk_unhash(tw);
+ sk_nulls_del_node_init_rcu((struct sock *)tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
}
spin_unlock(lock);
- if (twrefcnt)
- inet_twsk_put(tw);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
if (twp) {
*twp = tw;
} else if (tw) {
/* Silly. Should hash-dance instead... */
- inet_twsk_deschedule(tw);
-
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
}
return 0;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 55d19861ab20..5693b5eb8482 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -32,6 +32,7 @@
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
+#include <net/lwtunnel.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
@@ -177,6 +178,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
static void rt6_release(struct rt6_info *rt)
{
if (atomic_dec_and_test(&rt->rt6i_ref)) {
+ lwtstate_put(rt->rt6i_lwtstate);
rt6_free_pcpu(rt);
dst_free(&rt->dst);
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index a38d3ac0f18f..34f121812a14 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -728,7 +728,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
*/
ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
- ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+ ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
ipv6h->hop_limit = tunnel->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
@@ -1182,7 +1182,8 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
ip6_flow_hdr(ipv6h, 0,
ip6_make_flowlabel(dev_net(dev), skb,
- t->fl.u.ip6.flowlabel, false));
+ t->fl.u.ip6.flowlabel, true,
+ &t->fl.u.ip6));
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = NEXTHDR_GRE;
ipv6h->saddr = t->parms.laddr;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 57990c929cd8..adba03ac7ce9 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -45,6 +45,7 @@
#include <net/addrconf.h>
#include <net/xfrm.h>
#include <net/inet_ecn.h>
+#include <net/dst_metadata.h>
int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
{
@@ -55,7 +56,7 @@ int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
if (ipprot && ipprot->early_demux)
ipprot->early_demux(skb);
}
- if (!skb_dst(skb))
+ if (!skb_valid_dst(skb))
ip6_route_input(skb);
return dst_input(skb);
@@ -98,7 +99,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
* arrived via the sending interface (ethX), because of the
* nature of scoping architecture. --yoshfuji
*/
- IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
+ IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
goto err;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d5f7716662db..26ea47930740 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -207,7 +207,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
hlimit = ip6_dst_hoplimit(dst);
ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
- np->autoflowlabel));
+ np->autoflowlabel, fl6));
hdr->payload_len = htons(seg_len);
hdr->nexthdr = proto;
@@ -881,10 +881,9 @@ out:
return dst;
}
-static int ip6_dst_lookup_tail(struct sock *sk,
+static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
struct dst_entry **dst, struct flowi6 *fl6)
{
- struct net *net = sock_net(sk);
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
struct neighbour *n;
struct rt6_info *rt;
@@ -994,10 +993,11 @@ out_err_release:
*
* It returns zero on success, or a standard errno code on error.
*/
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6)
{
*dst = NULL;
- return ip6_dst_lookup_tail(sk, dst, fl6);
+ return ip6_dst_lookup_tail(net, sk, dst, fl6);
}
EXPORT_SYMBOL_GPL(ip6_dst_lookup);
@@ -1018,11 +1018,13 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
struct dst_entry *dst = NULL;
int err;
- err = ip6_dst_lookup_tail(sk, &dst, fl6);
+ err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
if (err)
return ERR_PTR(err);
if (final_dst)
fl6->daddr = *final_dst;
+ if (!fl6->flowi6_oif)
+ fl6->flowi6_oif = dst->dev->ifindex;
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
}
@@ -1050,7 +1052,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
dst = ip6_sk_dst_check(sk, dst, fl6);
- err = ip6_dst_lookup_tail(sk, &dst, fl6);
+ err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
if (err)
return ERR_PTR(err);
if (final_dst)
@@ -1647,7 +1649,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
ip6_flow_hdr(hdr, v6_cork->tclass,
ip6_make_flowlabel(net, skb, fl6->flowlabel,
- np->autoflowlabel));
+ np->autoflowlabel, fl6));
hdr->hop_limit = v6_cork->hop_limit;
hdr->nexthdr = proto;
hdr->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2e67b660118b..b0ab420612bc 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1095,7 +1095,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
- ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+ ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c53331cfed95..b3054611f88a 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1225,18 +1225,16 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (rt)
rt6_set_expires(rt, jiffies + (HZ * lifetime));
- if (ra_msg->icmph.icmp6_hop_limit) {
- /* Only set hop_limit on the interface if it is higher than
- * the current hop_limit.
- */
- if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+ if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+ ra_msg->icmph.icmp6_hop_limit) {
+ if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ if (rt)
+ dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+ ra_msg->icmph.icmp6_hop_limit);
} else {
- ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
}
- if (rt)
- dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
- ra_msg->icmph.icmp6_hop_limit);
}
skip_defrtr:
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 3c35ced39b42..4e21f80228be 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -305,7 +305,7 @@ static void trace_packet(const struct sk_buff *skb,
}
#endif
-static inline __pure struct ip6t_entry *
+static inline struct ip6t_entry *
ip6t_next_entry(const struct ip6t_entry *entry)
{
return (void *)entry + entry->next_offset;
@@ -324,12 +324,13 @@ ip6t_do_table(struct sk_buff *skb,
const char *indev, *outdev;
const void *table_base;
struct ip6t_entry *e, **jumpstack;
- unsigned int *stackptr, origptr, cpu;
+ unsigned int stackidx, cpu;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
/* Initialization */
+ stackidx = 0;
indev = state->in ? state->in->name : nulldevname;
outdev = state->out ? state->out->name : nulldevname;
/* We handle fragments by dealing with the first fragment as
@@ -357,8 +358,16 @@ ip6t_do_table(struct sk_buff *skb,
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
- stackptr = per_cpu_ptr(private->stackptr, cpu);
- origptr = *stackptr;
+
+ /* Switch to alternate jumpstack if we're being invoked via TEE.
+ * TEE issues XT_CONTINUE verdict on original skb so we must not
+ * clobber the jumpstack.
+ *
+ * For recursion via REJECT or SYNPROXY the stack will be clobbered
+ * but it is no problem since absolute verdict is issued by these.
+ */
+ if (static_key_false(&xt_tee_enabled))
+ jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
e = get_entry(table_base, private->hook_entry[hook]);
@@ -406,20 +415,16 @@ ip6t_do_table(struct sk_buff *skb,
verdict = (unsigned int)(-v) - 1;
break;
}
- if (*stackptr <= origptr)
+ if (stackidx == 0)
e = get_entry(table_base,
private->underflow[hook]);
else
- e = ip6t_next_entry(jumpstack[--*stackptr]);
+ e = ip6t_next_entry(jumpstack[--stackidx]);
continue;
}
if (table_base + v != ip6t_next_entry(e) &&
!(e->ipv6.flags & IP6T_F_GOTO)) {
- if (*stackptr >= private->stacksize) {
- verdict = NF_DROP;
- break;
- }
- jumpstack[(*stackptr)++] = e;
+ jumpstack[stackidx++] = e;
}
e = get_entry(table_base, v);
@@ -437,8 +442,6 @@ ip6t_do_table(struct sk_buff *skb,
break;
} while (!acpar.hotdrop);
- *stackptr = origptr;
-
xt_write_recseq_end(addend);
local_bh_enable();
@@ -452,11 +455,15 @@ ip6t_do_table(struct sk_buff *skb,
}
/* Figures out from what hook each rule can be called: returns 0 if
- there are loops. Puts hook bitmask in comefrom. */
+ * there are loops. Puts hook bitmask in comefrom.
+ *
+ * Keeps track of largest call depth seen and stores it in newinfo->stacksize.
+ */
static int
-mark_source_chains(const struct xt_table_info *newinfo,
+mark_source_chains(struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
+ unsigned int calldepth, max_calldepth = 0;
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
@@ -470,6 +477,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
/* Set initial back pointer. */
e->counters.pcnt = pos;
+ calldepth = 0;
for (;;) {
const struct xt_standard_target *t
@@ -531,6 +539,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
(entry0 + pos + size);
e->counters.pcnt = pos;
pos += size;
+ if (calldepth > 0)
+ --calldepth;
} else {
int newpos = t->verdict;
@@ -544,6 +554,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
newpos);
return 0;
}
+ if (entry0 + newpos != ip6t_next_entry(e) &&
+ !(e->ipv6.flags & IP6T_F_GOTO) &&
+ ++calldepth > max_calldepth)
+ max_calldepth = calldepth;
+
/* This a jump; chase it. */
duprintf("Jump rule %u -> %u\n",
pos, newpos);
@@ -560,6 +575,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
next:
duprintf("Finished chain %u\n", hook);
}
+ newinfo->stacksize = max_calldepth;
return 1;
}
@@ -839,9 +855,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
if (ret != 0)
return ret;
++i;
- if (strcmp(ip6t_get_target(iter)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
}
if (i != repl->num_entries) {
@@ -1754,9 +1767,6 @@ translate_compat_table(struct net *net,
if (ret != 0)
break;
++i;
- if (strcmp(ip6t_get_target(iter1)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
}
if (ret) {
/*
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 12331efd49cf..567367a75172 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -35,14 +35,12 @@ MODULE_AUTHOR("Yasuyuki KOZAKAI <yasuyuki.kozakai@toshiba.co.jp>");
MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv6");
MODULE_LICENSE("GPL");
-
static unsigned int
reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ip6t_reject_info *reject = par->targinfo;
struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
- pr_debug("%s: medium point\n", __func__);
switch (reject->with) {
case IP6T_ICMP6_NO_ROUTE:
nf_send_unreach6(net, skb, ICMPV6_NOROUTE, par->hooknum);
@@ -65,9 +63,6 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
case IP6T_TCP_RESET:
nf_send_reset6(net, skb, par->hooknum);
break;
- default:
- net_info_ratelimited("case %u not handled yet\n", reject->with);
- break;
}
return NF_DROP;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index a45db0b4785c..267fb8d5876e 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -39,12 +39,9 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
if (skb->nfct)
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
#endif
-
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+ if (nf_bridge_in_prerouting(skb))
return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
-#endif
+
if (hooknum == NF_INET_PRE_ROUTING)
return IP6_DEFRAG_CONNTRACK_IN + zone;
else
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ca4700cb26c4..fdbada1569a3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -295,7 +295,8 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
* unspecified and mapped address have a v4 equivalent.
*/
v4addr = LOOPBACK4_IPV6;
- if (!(addr_type & IPV6_ADDR_MULTICAST)) {
+ if (!(addr_type & IPV6_ADDR_MULTICAST) &&
+ !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
dev, 0)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9de4d2bcd916..c0fa61eba8f2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -58,6 +58,7 @@
#include <net/netevent.h>
#include <net/netlink.h>
#include <net/nexthop.h>
+#include <net/lwtunnel.h>
#include <asm/uaccess.h>
@@ -544,6 +545,7 @@ static void rt6_probe_deferred(struct work_struct *w)
static void rt6_probe(struct rt6_info *rt)
{
+ struct __rt6_probe_work *work;
struct neighbour *neigh;
/*
* Okay, this does not seem to be appropriate
@@ -558,34 +560,33 @@ static void rt6_probe(struct rt6_info *rt)
rcu_read_lock_bh();
neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
if (neigh) {
- write_lock(&neigh->lock);
if (neigh->nud_state & NUD_VALID)
goto out;
- }
-
- if (!neigh ||
- time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
- struct __rt6_probe_work *work;
+ work = NULL;
+ write_lock(&neigh->lock);
+ if (!(neigh->nud_state & NUD_VALID) &&
+ time_after(jiffies,
+ neigh->updated +
+ rt->rt6i_idev->cnf.rtr_probe_interval)) {
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work)
+ __neigh_set_probe_once(neigh);
+ }
+ write_unlock(&neigh->lock);
+ } else {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ }
- if (neigh && work)
- __neigh_set_probe_once(neigh);
-
- if (neigh)
- write_unlock(&neigh->lock);
+ if (work) {
+ INIT_WORK(&work->work, rt6_probe_deferred);
+ work->target = rt->rt6i_gateway;
+ dev_hold(rt->dst.dev);
+ work->dev = rt->dst.dev;
+ schedule_work(&work->work);
+ }
- if (work) {
- INIT_WORK(&work->work, rt6_probe_deferred);
- work->target = rt->rt6i_gateway;
- dev_hold(rt->dst.dev);
- work->dev = rt->dst.dev;
- schedule_work(&work->work);
- }
- } else {
out:
- write_unlock(&neigh->lock);
- }
rcu_read_unlock_bh();
}
#else
@@ -1770,6 +1771,18 @@ int ip6_route_add(struct fib6_config *cfg)
rt->dst.output = ip6_output;
+ if (cfg->fc_encap) {
+ struct lwtunnel_state *lwtstate;
+
+ err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+ cfg->fc_encap, &lwtstate);
+ if (err)
+ goto out;
+ rt->rt6i_lwtstate = lwtstate_get(lwtstate);
+ if (lwtunnel_output_redirect(rt->rt6i_lwtstate))
+ rt->dst.output = lwtunnel_output6;
+ }
+
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->rt6i_dst.plen = cfg->fc_dst_len;
if (rt->rt6i_dst.plen == 128)
@@ -2149,6 +2162,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
#endif
rt->rt6i_prefsrc = ort->rt6i_prefsrc;
rt->rt6i_table = ort->rt6i_table;
+ rt->rt6i_lwtstate = lwtstate_get(ort->rt6i_lwtstate);
}
#ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2597,6 +2611,8 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
[RTA_PREF] = { .type = NLA_U8 },
+ [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
+ [RTA_ENCAP] = { .type = NLA_NESTED },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2691,6 +2707,12 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->fc_flags |= RTF_PREF(pref);
}
+ if (tb[RTA_ENCAP])
+ cfg->fc_encap = tb[RTA_ENCAP];
+
+ if (tb[RTA_ENCAP_TYPE])
+ cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+
err = 0;
errout:
return err;
@@ -2723,6 +2745,10 @@ beginning:
r_cfg.fc_gateway = nla_get_in6_addr(nla);
r_cfg.fc_flags |= RTF_GATEWAY;
}
+ r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+ nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+ if (nla)
+ r_cfg.fc_encap_type = nla_get_u16(nla);
}
err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
if (err) {
@@ -2785,7 +2811,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
return ip6_route_add(&cfg);
}
-static inline size_t rt6_nlmsg_size(void)
+static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
{
return NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(16) /* RTA_SRC */
@@ -2799,7 +2825,8 @@ static inline size_t rt6_nlmsg_size(void)
+ RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
+ nla_total_size(sizeof(struct rta_cacheinfo))
+ nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
- + nla_total_size(1); /* RTA_PREF */
+ + nla_total_size(1) /* RTA_PREF */
+ + lwtunnel_get_encap_size(rt->rt6i_lwtstate);
}
static int rt6_fill_node(struct net *net,
@@ -2947,6 +2974,8 @@ static int rt6_fill_node(struct net *net,
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
goto nla_put_failure;
+ lwtunnel_fill_encap(skb, rt->rt6i_lwtstate);
+
nlmsg_end(skb, nlh);
return 0;
@@ -3073,7 +3102,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
err = -ENOBUFS;
seq = info->nlh ? info->nlh->nlmsg_seq : 0;
- skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
+ skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
if (!skb)
goto errout;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index ac35a28599be..94428fd85b2f 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -742,7 +742,7 @@ static int ipip_rcv(struct sk_buff *skb)
goto drop;
if (iptunnel_pull_header(skb, 0, tpi.proto))
goto drop;
- return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+ return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
}
return 1;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 4e705add4f18..45243bbe5253 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -17,6 +17,9 @@
#include <net/inet_frag.h>
static int one = 1;
+static int auto_flowlabels_min;
+static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
+
static struct ctl_table ipv6_table_template[] = {
{
@@ -45,7 +48,9 @@ static struct ctl_table ipv6_table_template[] = {
.data = &init_net.ipv6.sysctl.auto_flowlabels,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &auto_flowlabels_min,
+ .extra2 = &auto_flowlabels_max
},
{
.procname = "fwmark_reflect",
@@ -75,6 +80,13 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "ip_nonlocal_bind",
+ .data = &init_net.ipv6.sysctl.ip_nonlocal_bind,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{ }
};
@@ -117,6 +129,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
+ ipv6_table[8].data = &net->ipv6.sysctl.ip_nonlocal_bind;
ipv6_route_table = ipv6_route_sysctl_init(net);
if (!ipv6_route_table)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7a6cea5e4274..97d9314ea361 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -276,7 +276,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (err)
goto late_failure;
- ip6_set_txhash(sk);
+ sk_set_txhash(sk);
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
@@ -1090,7 +1090,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
newsk->sk_bound_dev_if = ireq->ir_iif;
- ip6_set_txhash(newsk);
+ sk_set_txhash(newsk);
/* Now IPv6 options...
@@ -1481,8 +1481,7 @@ do_time_wait:
ntohs(th->dest), tcp_v6_iif(skb));
if (sk2) {
struct inet_timewait_sock *tw = inet_twsk(sk);
- inet_twsk_deschedule(tw);
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
sk = sk2;
tcp_v6_restore_cb(skb);
goto process;
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index 317c4662e544..f7ba51e8b4ca 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -44,6 +44,49 @@ static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy,
ieee802154_if_remove(sdata);
}
+#ifdef CONFIG_PM
+static int ieee802154_suspend(struct wpan_phy *wpan_phy)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+ if (!local->open_count)
+ goto suspend;
+
+ ieee802154_stop_queue(&local->hw);
+ synchronize_net();
+
+ /* stop hardware - this must stop RX */
+ ieee802154_stop_device(local);
+
+suspend:
+ local->suspended = true;
+ return 0;
+}
+
+static int ieee802154_resume(struct wpan_phy *wpan_phy)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+ int ret;
+
+ /* nothing to do if HW shouldn't run */
+ if (!local->open_count)
+ goto wake_up;
+
+ /* restart hardware */
+ ret = drv_start(local);
+ if (ret)
+ return ret;
+
+wake_up:
+ ieee802154_wake_queue(&local->hw);
+ local->suspended = false;
+ return 0;
+}
+#else
+#define ieee802154_suspend NULL
+#define ieee802154_resume NULL
+#endif
+
static int
ieee802154_add_iface(struct wpan_phy *phy, const char *name,
unsigned char name_assign_type,
@@ -145,13 +188,18 @@ static int
ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le16 pan_id)
{
+ int ret;
+
ASSERT_RTNL();
if (wpan_dev->pan_id == pan_id)
return 0;
- wpan_dev->pan_id = pan_id;
- return 0;
+ ret = mac802154_wpan_update_llsec(wpan_dev->netdev);
+ if (!ret)
+ wpan_dev->pan_id = pan_id;
+
+ return ret;
}
static int
@@ -227,6 +275,8 @@ ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct cfg802154_ops mac802154_config_ops = {
.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
+ .suspend = ieee802154_suspend,
+ .resume = ieee802154_resume,
.add_virtual_intf = ieee802154_add_iface,
.del_virtual_intf = ieee802154_del_iface,
.set_channel = ieee802154_set_channel,
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 34755d5751a4..56ccffa3f2bf 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -56,9 +56,13 @@ struct ieee802154_local {
struct hrtimer ifs_timer;
bool started;
+ bool suspended;
struct tasklet_struct tasklet;
struct sk_buff_head skb_queue;
+
+ struct sk_buff *tx_skb;
+ struct work_struct tx_work;
};
enum {
@@ -94,8 +98,6 @@ struct ieee802154_sub_if_data {
struct mac802154_llsec sec;
};
-#define MAC802154_CHAN_NONE 0xff /* No channel is assigned */
-
/* utility functions/constants */
extern const void *const mac802154_wpan_phy_privid; /* for wpan_phy privid */
@@ -125,6 +127,8 @@ ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata)
extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
+void ieee802154_xmit_worker(struct work_struct *work);
netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t
@@ -167,6 +171,8 @@ void mac802154_get_table(struct net_device *dev,
struct ieee802154_llsec_table **t);
void mac802154_unlock_table(struct net_device *dev);
+int mac802154_wpan_update_llsec(struct net_device *dev);
+
/* interface handling */
int ieee802154_iface_init(void);
void ieee802154_iface_exit(void);
@@ -176,5 +182,6 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
unsigned char name_assign_type, enum nl802154_iftype type,
__le64 extended_addr);
void ieee802154_remove_interfaces(struct ieee802154_local *local);
+void ieee802154_stop_device(struct ieee802154_local *local);
#endif /* __IEEE802154_I_H */
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 8b698246a51b..416de903e467 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -30,7 +30,7 @@
#include "ieee802154_i.h"
#include "driver-ops.h"
-static int mac802154_wpan_update_llsec(struct net_device *dev)
+int mac802154_wpan_update_llsec(struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
@@ -314,11 +314,8 @@ static int mac802154_slave_close(struct net_device *dev)
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
- if (!local->open_count) {
- flush_workqueue(local->workqueue);
- hrtimer_cancel(&local->ifs_timer);
- drv_stop(local);
- }
+ if (!local->open_count)
+ ieee802154_stop_device(local);
return 0;
}
@@ -471,6 +468,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
enum nl802154_iftype type)
{
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ int ret;
u8 tmp;
/* set some type-dependent values */
@@ -505,6 +503,10 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
mutex_init(&sdata->sec_mtx);
mac802154_llsec_init(&sdata->sec);
+ ret = mac802154_wpan_update_llsec(sdata->dev);
+ if (ret < 0)
+ return ret;
+
break;
case NL802154_IFTYPE_MONITOR:
sdata->dev->destructor = free_netdev;
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 356b346e1ee8..9e55431b9a5c 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -40,7 +40,7 @@ static void ieee802154_tasklet_handler(unsigned long data)
* netstack.
*/
skb->pkt_type = 0;
- ieee802154_rx(&local->hw, skb);
+ ieee802154_rx(local, skb);
break;
default:
WARN(1, "mac802154: Packet is of unknown type %d\n",
@@ -58,11 +58,9 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
struct ieee802154_local *local;
size_t priv_size;
- if (!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
- !ops->start || !ops->stop || !ops->set_channel) {
- pr_err("undefined IEEE802.15.4 device operations\n");
+ if (WARN_ON(!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
+ !ops->start || !ops->stop || !ops->set_channel))
return NULL;
- }
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wpan_phy priv data for both our ieee802154_local and for
@@ -107,6 +105,8 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
skb_queue_head_init(&local->skb_queue);
+ INIT_WORK(&local->tx_work, ieee802154_xmit_worker);
+
/* init supported flags with 802.15.4 default ranges */
phy->supported.max_minbe = 8;
phy->supported.min_maxbe = 3;
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index d93ad2d4a4fc..d1c33c1d6b9b 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -246,13 +246,15 @@ ieee802154_monitors_rx(struct ieee802154_local *local, struct sk_buff *skb)
}
}
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
{
- struct ieee802154_local *local = hw_to_local(hw);
u16 crc;
WARN_ON_ONCE(softirq_count() == 0);
+ if (local->suspended)
+ goto drop;
+
/* TODO: When a transceiver omits the checksum here, we
* add an own calculated one. This is currently an ugly
* solution because the monitor needs a crc here.
@@ -273,8 +275,7 @@ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
crc = crc_ccitt(0, skb->data, skb->len);
if (crc) {
rcu_read_unlock();
- kfree_skb(skb);
- return;
+ goto drop;
}
}
/* remove crc */
@@ -283,8 +284,11 @@ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
__ieee802154_rx_handle_packet(local, skb);
rcu_read_unlock();
+
+ return;
+drop:
+ kfree_skb(skb);
}
-EXPORT_SYMBOL(ieee802154_rx);
void
ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index c62e95695c78..7ed439172f30 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -30,23 +30,11 @@
#include "ieee802154_i.h"
#include "driver-ops.h"
-/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
- * packets through the workqueue.
- */
-struct ieee802154_xmit_cb {
- struct sk_buff *skb;
- struct work_struct work;
- struct ieee802154_local *local;
-};
-
-static struct ieee802154_xmit_cb ieee802154_xmit_cb;
-
-static void ieee802154_xmit_worker(struct work_struct *work)
+void ieee802154_xmit_worker(struct work_struct *work)
{
- struct ieee802154_xmit_cb *cb =
- container_of(work, struct ieee802154_xmit_cb, work);
- struct ieee802154_local *local = cb->local;
- struct sk_buff *skb = cb->skb;
+ struct ieee802154_local *local =
+ container_of(work, struct ieee802154_local, tx_work);
+ struct sk_buff *skb = local->tx_skb;
struct net_device *dev = skb->dev;
int res;
@@ -106,11 +94,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
} else {
- INIT_WORK(&ieee802154_xmit_cb.work, ieee802154_xmit_worker);
- ieee802154_xmit_cb.skb = skb;
- ieee802154_xmit_cb.local = local;
-
- queue_work(local->workqueue, &ieee802154_xmit_cb.work);
+ local->tx_skb = skb;
+ queue_work(local->workqueue, &local->tx_work);
}
return NETDEV_TX_OK;
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index 583435f38930..f9fd0957ab67 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -14,6 +14,7 @@
*/
#include "ieee802154_i.h"
+#include "driver-ops.h"
/* privid for wpan_phys to determine whether they belong to us or not */
const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
@@ -92,3 +93,10 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
dev_consume_skb_any(skb);
}
EXPORT_SYMBOL(ieee802154_xmit_complete);
+
+void ieee802154_stop_device(struct ieee802154_local *local)
+{
+ flush_workqueue(local->workqueue);
+ hrtimer_cancel(&local->ifs_timer);
+ drv_stop(local);
+}
diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig
index 17bde799c854..5c467ef97311 100644
--- a/net/mpls/Kconfig
+++ b/net/mpls/Kconfig
@@ -24,7 +24,13 @@ config NET_MPLS_GSO
config MPLS_ROUTING
tristate "MPLS: routing support"
- help
+ ---help---
Add support for forwarding of mpls packets.
+config MPLS_IPTUNNEL
+ tristate "MPLS: IP over MPLS tunnel support"
+ depends on LWTUNNEL && MPLS_ROUTING
+ ---help---
+ mpls ip tunnel support.
+
endif # MPLS
diff --git a/net/mpls/Makefile b/net/mpls/Makefile
index 65bbe68c72e6..9ca923625016 100644
--- a/net/mpls/Makefile
+++ b/net/mpls/Makefile
@@ -3,5 +3,6 @@
#
obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
obj-$(CONFIG_MPLS_ROUTING) += mpls_router.o
+obj-$(CONFIG_MPLS_IPTUNNEL) += mpls_iptunnel.o
mpls_router-y := af_mpls.o
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1f93a5978f2a..8c5707db53c5 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -15,6 +15,10 @@
#include <net/ip_fib.h>
#include <net/netevent.h>
#include <net/netns/generic.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#endif
#include "internal.h"
#define LABEL_NOT_SPECIFIED (1<<20)
@@ -23,11 +27,23 @@
/* This maximum ha length copied from the definition of struct neighbour */
#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
+enum mpls_payload_type {
+ MPT_UNSPEC, /* IPv4 or IPv6 */
+ MPT_IPV4 = 4,
+ MPT_IPV6 = 6,
+
+ /* Other types not implemented:
+ * - Pseudo-wire with or without control word (RFC4385)
+ * - GAL (RFC5586)
+ */
+};
+
struct mpls_route { /* next hop label forwarding entry */
struct net_device __rcu *rt_dev;
struct rcu_head rt_rcu;
u32 rt_label[MAX_NEW_LABELS];
u8 rt_protocol; /* routing protocol that set this entry */
+ u8 rt_payload_type;
u8 rt_labels;
u8 rt_via_alen;
u8 rt_via_table;
@@ -58,10 +74,11 @@ static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
return rcu_dereference_rtnl(dev->mpls_ptr);
}
-static bool mpls_output_possible(const struct net_device *dev)
+bool mpls_output_possible(const struct net_device *dev)
{
return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
}
+EXPORT_SYMBOL_GPL(mpls_output_possible);
static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
{
@@ -69,13 +86,14 @@ static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
return rt->rt_labels * sizeof(struct mpls_shim_hdr);
}
-static unsigned int mpls_dev_mtu(const struct net_device *dev)
+unsigned int mpls_dev_mtu(const struct net_device *dev)
{
/* The amount of data the layer 2 frame can hold */
return dev->mtu;
}
+EXPORT_SYMBOL_GPL(mpls_dev_mtu);
-static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
{
if (skb->len <= mtu)
return false;
@@ -85,20 +103,13 @@ static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
return true;
}
+EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
struct mpls_entry_decoded dec)
{
- /* RFC4385 and RFC5586 encode other packets in mpls such that
- * they don't conflict with the ip version number, making
- * decoding by examining the ip version correct in everything
- * except for the strangest cases.
- *
- * The strange cases if we choose to support them will require
- * manual configuration.
- */
- struct iphdr *hdr4;
- bool success = true;
+ enum mpls_payload_type payload_type;
+ bool success = false;
/* The IPv4 code below accesses through the IPv4 header
* checksum, which is 12 bytes into the packet.
@@ -113,23 +124,32 @@ static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
if (!pskb_may_pull(skb, 12))
return false;
- /* Use ip_hdr to find the ip protocol version */
- hdr4 = ip_hdr(skb);
- if (hdr4->version == 4) {
+ payload_type = rt->rt_payload_type;
+ if (payload_type == MPT_UNSPEC)
+ payload_type = ip_hdr(skb)->version;
+
+ switch (payload_type) {
+ case MPT_IPV4: {
+ struct iphdr *hdr4 = ip_hdr(skb);
skb->protocol = htons(ETH_P_IP);
csum_replace2(&hdr4->check,
htons(hdr4->ttl << 8),
htons(dec.ttl << 8));
hdr4->ttl = dec.ttl;
+ success = true;
+ break;
}
- else if (hdr4->version == 6) {
+ case MPT_IPV6: {
struct ipv6hdr *hdr6 = ipv6_hdr(skb);
skb->protocol = htons(ETH_P_IPV6);
hdr6->hop_limit = dec.ttl;
+ success = true;
+ break;
+ }
+ case MPT_UNSPEC:
+ break;
}
- else
- /* version 0 and version 1 are used by pseudo wires */
- success = false;
+
return success;
}
@@ -248,16 +268,17 @@ static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
};
struct mpls_route_config {
- u32 rc_protocol;
- u32 rc_ifindex;
- u16 rc_via_table;
- u16 rc_via_alen;
- u8 rc_via[MAX_VIA_ALEN];
- u32 rc_label;
- u32 rc_output_labels;
- u32 rc_output_label[MAX_NEW_LABELS];
- u32 rc_nlflags;
- struct nl_info rc_nlinfo;
+ u32 rc_protocol;
+ u32 rc_ifindex;
+ u16 rc_via_table;
+ u16 rc_via_alen;
+ u8 rc_via[MAX_VIA_ALEN];
+ u32 rc_label;
+ u32 rc_output_labels;
+ u32 rc_output_label[MAX_NEW_LABELS];
+ u32 rc_nlflags;
+ enum mpls_payload_type rc_payload_type;
+ struct nl_info rc_nlinfo;
};
static struct mpls_route *mpls_rt_alloc(size_t alen)
@@ -286,7 +307,7 @@ static void mpls_notify_route(struct net *net, unsigned index,
struct mpls_route *rt = new ? new : old;
unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
/* Ignore reserved labels for now */
- if (rt && (index >= 16))
+ if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
}
@@ -320,13 +341,96 @@ static unsigned find_free_label(struct net *net)
platform_label = rtnl_dereference(net->mpls.platform_label);
platform_labels = net->mpls.platform_labels;
- for (index = 16; index < platform_labels; index++) {
+ for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
+ index++) {
if (!rtnl_dereference(platform_label[index]))
return index;
}
return LABEL_NOT_SPECIFIED;
}
+#if IS_ENABLED(CONFIG_INET)
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+ struct net_device *dev;
+ struct rtable *rt;
+ struct in_addr daddr;
+
+ memcpy(&daddr, addr, sizeof(struct in_addr));
+ rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
+ if (IS_ERR(rt))
+ return ERR_CAST(rt);
+
+ dev = rt->dst.dev;
+ dev_hold(dev);
+
+ ip_rt_put(rt);
+
+ return dev;
+}
+#else
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+ return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+ struct net_device *dev;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ int err;
+
+ if (!ipv6_stub)
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ memset(&fl6, 0, sizeof(fl6));
+ memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
+ err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
+ if (err)
+ return ERR_PTR(err);
+
+ dev = dst->dev;
+ dev_hold(dev);
+ dst_release(dst);
+
+ return dev;
+}
+#else
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+ return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+static struct net_device *find_outdev(struct net *net,
+ struct mpls_route_config *cfg)
+{
+ struct net_device *dev = NULL;
+
+ if (!cfg->rc_ifindex) {
+ switch (cfg->rc_via_table) {
+ case NEIGH_ARP_TABLE:
+ dev = inet_fib_lookup_dev(net, cfg->rc_via);
+ break;
+ case NEIGH_ND_TABLE:
+ dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+ break;
+ case NEIGH_LINK_TABLE:
+ break;
+ }
+ } else {
+ dev = dev_get_by_index(net, cfg->rc_ifindex);
+ }
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return dev;
+}
+
static int mpls_route_add(struct mpls_route_config *cfg)
{
struct mpls_route __rcu **platform_label;
@@ -345,8 +449,8 @@ static int mpls_route_add(struct mpls_route_config *cfg)
index = find_free_label(net);
}
- /* The first 16 labels are reserved, and may not be set */
- if (index < 16)
+ /* Reserved labels may not be set */
+ if (index < MPLS_LABEL_FIRST_UNRESERVED)
goto errout;
/* The full 20 bit range may not be supported. */
@@ -357,10 +461,12 @@ static int mpls_route_add(struct mpls_route_config *cfg)
if (cfg->rc_output_labels > MAX_NEW_LABELS)
goto errout;
- err = -ENODEV;
- dev = dev_get_by_index(net, cfg->rc_ifindex);
- if (!dev)
+ dev = find_outdev(net, cfg);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ dev = NULL;
goto errout;
+ }
/* Ensure this is a supported device */
err = -EINVAL;
@@ -401,6 +507,7 @@ static int mpls_route_add(struct mpls_route_config *cfg)
rt->rt_label[i] = cfg->rc_output_label[i];
rt->rt_protocol = cfg->rc_protocol;
RCU_INIT_POINTER(rt->rt_dev, dev);
+ rt->rt_payload_type = cfg->rc_payload_type;
rt->rt_via_table = cfg->rc_via_table;
memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
@@ -423,8 +530,8 @@ static int mpls_route_del(struct mpls_route_config *cfg)
index = cfg->rc_label;
- /* The first 16 labels are reserved, and may not be removed */
- if (index < 16)
+ /* Reserved labels may not be removed */
+ if (index < MPLS_LABEL_FIRST_UNRESERVED)
goto errout;
/* The full 20 bit range may not be supported */
@@ -626,6 +733,7 @@ int nla_put_labels(struct sk_buff *skb, int attrtype,
return 0;
}
+EXPORT_SYMBOL_GPL(nla_put_labels);
int nla_get_labels(const struct nlattr *nla,
u32 max_labels, u32 *labels, u32 label[])
@@ -671,6 +779,7 @@ int nla_get_labels(const struct nlattr *nla,
*labels = nla_labels;
return 0;
}
+EXPORT_SYMBOL_GPL(nla_get_labels);
static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
struct mpls_route_config *cfg)
@@ -740,8 +849,8 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
&cfg->rc_label))
goto errout;
- /* The first 16 labels are reserved, and may not be set */
- if (cfg->rc_label < 16)
+ /* Reserved labels may not be set */
+ if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
goto errout;
break;
@@ -866,8 +975,8 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
ASSERT_RTNL();
index = cb->args[0];
- if (index < 16)
- index = 16;
+ if (index < MPLS_LABEL_FIRST_UNRESERVED)
+ index = MPLS_LABEL_FIRST_UNRESERVED;
platform_label = rtnl_dereference(net->mpls.platform_label);
platform_labels = net->mpls.platform_labels;
@@ -953,6 +1062,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
goto nort0;
RCU_INIT_POINTER(rt0->rt_dev, lo);
rt0->rt_protocol = RTPROT_KERNEL;
+ rt0->rt_payload_type = MPT_IPV4;
rt0->rt_via_table = NEIGH_LINK_TABLE;
memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
}
@@ -963,6 +1073,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
goto nort2;
RCU_INIT_POINTER(rt2->rt_dev, lo);
rt2->rt_protocol = RTPROT_KERNEL;
+ rt2->rt_payload_type = MPT_IPV6;
rt2->rt_via_table = NEIGH_LINK_TABLE;
memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
}
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 8cabeb5a1cb9..2681a4ba6c37 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -50,7 +50,12 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *
return result;
}
-int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels, const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]);
+int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels,
+ const u32 label[]);
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+ u32 label[]);
+bool mpls_output_possible(const struct net_device *dev);
+unsigned int mpls_dev_mtu(const struct net_device *dev);
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
#endif /* MPLS_INTERNAL_H */
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
new file mode 100644
index 000000000000..276f8c992218
--- /dev/null
+++ b/net/mpls/mpls_iptunnel.c
@@ -0,0 +1,233 @@
+/*
+ * mpls tunnels An implementation mpls tunnels using the light weight tunnel
+ * infrastructure
+ *
+ * Authors: Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/module.h>
+#include <linux/mpls.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/dst.h>
+#include <net/lwtunnel.h>
+#include <net/netevent.h>
+#include <net/netns/generic.h>
+#include <net/ip6_fib.h>
+#include <net/route.h>
+#include <net/mpls_iptunnel.h>
+#include <linux/mpls_iptunnel.h>
+#include "internal.h"
+
+static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
+ [MPLS_IPTUNNEL_DST] = { .type = NLA_U32 },
+};
+
+static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
+{
+ /* The size of the layer 2.5 labels to be added for this route */
+ return en->labels * sizeof(struct mpls_shim_hdr);
+}
+
+int mpls_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct mpls_iptunnel_encap *tun_encap_info;
+ struct mpls_shim_hdr *hdr;
+ struct net_device *out_dev;
+ unsigned int hh_len;
+ unsigned int new_header_size;
+ unsigned int mtu;
+ struct dst_entry *dst = skb_dst(skb);
+ struct rtable *rt = NULL;
+ struct rt6_info *rt6 = NULL;
+ struct lwtunnel_state *lwtstate = NULL;
+ int err = 0;
+ bool bos;
+ int i;
+ unsigned int ttl;
+
+ /* Obtain the ttl */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ttl = ip_hdr(skb)->ttl;
+ rt = (struct rtable *)dst;
+ lwtstate = rt->rt_lwtstate;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ ttl = ipv6_hdr(skb)->hop_limit;
+ rt6 = (struct rt6_info *)dst;
+ lwtstate = rt6->rt6i_lwtstate;
+ } else {
+ goto drop;
+ }
+
+ skb_orphan(skb);
+
+ /* Find the output device */
+ out_dev = dst->dev;
+ if (!mpls_output_possible(out_dev) ||
+ !lwtstate || skb_warn_if_lro(skb))
+ goto drop;
+
+ skb_forward_csum(skb);
+
+ tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+ /* Verify the destination can hold the packet */
+ new_header_size = mpls_encap_size(tun_encap_info);
+ mtu = mpls_dev_mtu(out_dev);
+ if (mpls_pkt_too_big(skb, mtu - new_header_size))
+ goto drop;
+
+ hh_len = LL_RESERVED_SPACE(out_dev);
+ if (!out_dev->header_ops)
+ hh_len = 0;
+
+ /* Ensure there is enough space for the headers in the skb */
+ if (skb_cow(skb, hh_len + new_header_size))
+ goto drop;
+
+ skb_push(skb, new_header_size);
+ skb_reset_network_header(skb);
+
+ skb->dev = out_dev;
+ skb->protocol = htons(ETH_P_MPLS_UC);
+
+ /* Push the new labels */
+ hdr = mpls_hdr(skb);
+ bos = true;
+ for (i = tun_encap_info->labels - 1; i >= 0; i--) {
+ hdr[i] = mpls_entry_encode(tun_encap_info->label[i],
+ ttl, 0, bos);
+ bos = false;
+ }
+
+ if (rt)
+ err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
+ skb);
+ else if (rt6)
+ err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+ skb);
+ if (err)
+ net_dbg_ratelimited("%s: packet transmission failed: %d\n",
+ __func__, err);
+
+ return 0;
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
+ struct lwtunnel_state **ts)
+{
+ struct mpls_iptunnel_encap *tun_encap_info;
+ struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
+ struct lwtunnel_state *newts;
+ int tun_encap_info_len;
+ int ret;
+
+ ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla,
+ mpls_iptunnel_policy);
+ if (ret < 0)
+ return ret;
+
+ if (!tb[MPLS_IPTUNNEL_DST])
+ return -EINVAL;
+
+ tun_encap_info_len = sizeof(*tun_encap_info);
+
+ newts = lwtunnel_state_alloc(tun_encap_info_len);
+ if (!newts)
+ return -ENOMEM;
+
+ newts->len = tun_encap_info_len;
+ tun_encap_info = mpls_lwtunnel_encap(newts);
+ ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
+ &tun_encap_info->labels, tun_encap_info->label);
+ if (ret)
+ goto errout;
+ newts->type = LWTUNNEL_ENCAP_MPLS;
+ newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
+
+ *ts = newts;
+
+ return 0;
+
+errout:
+ kfree(newts);
+ *ts = NULL;
+
+ return ret;
+}
+
+static int mpls_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ struct mpls_iptunnel_encap *tun_encap_info;
+
+ tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+ if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels,
+ tun_encap_info->label))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ struct mpls_iptunnel_encap *tun_encap_info;
+
+ tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+ return nla_total_size(tun_encap_info->labels * 4);
+}
+
+static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ struct mpls_iptunnel_encap *a_hdr = mpls_lwtunnel_encap(a);
+ struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
+ int l;
+
+ if (a_hdr->labels != b_hdr->labels)
+ return 1;
+
+ for (l = 0; l < MAX_NEW_LABELS; l++)
+ if (a_hdr->label[l] != b_hdr->label[l])
+ return 1;
+ return 0;
+}
+
+static const struct lwtunnel_encap_ops mpls_iptun_ops = {
+ .build_state = mpls_build_state,
+ .output = mpls_output,
+ .fill_encap = mpls_fill_encap_info,
+ .get_encap_size = mpls_encap_nlsize,
+ .cmp_encap = mpls_encap_cmp,
+};
+
+static int __init mpls_iptunnel_init(void)
+{
+ return lwtunnel_encap_add_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_init(mpls_iptunnel_init);
+
+static void __exit mpls_iptunnel_exit(void)
+{
+ lwtunnel_encap_del_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_exit(mpls_iptunnel_exit);
+
+MODULE_DESCRIPTION("MultiProtocol Label Switching IP Tunnels");
+MODULE_LICENSE("GPL v2");
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index a0e54974e2c9..2a5a0704245c 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -34,6 +34,9 @@ EXPORT_SYMBOL(nf_afinfo);
const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_ipv6_ops);
+DEFINE_PER_CPU(bool, nf_skb_duplicated);
+EXPORT_SYMBOL_GPL(nf_skb_duplicated);
+
int nf_register_afinfo(const struct nf_afinfo *afinfo)
{
mutex_lock(&afinfo_mutex);
@@ -52,9 +55,6 @@ void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
}
EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
-struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
-EXPORT_SYMBOL(nf_hooks);
-
#ifdef HAVE_JUMP_LABEL
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
EXPORT_SYMBOL(nf_hooks_needed);
@@ -62,63 +62,166 @@ EXPORT_SYMBOL(nf_hooks_needed);
static DEFINE_MUTEX(nf_hook_mutex);
-int nf_register_hook(struct nf_hook_ops *reg)
+static struct list_head *nf_find_hook_list(struct net *net,
+ const struct nf_hook_ops *reg)
{
- struct list_head *nf_hook_list;
- struct nf_hook_ops *elem;
+ struct list_head *hook_list = NULL;
- mutex_lock(&nf_hook_mutex);
- switch (reg->pf) {
- case NFPROTO_NETDEV:
+ if (reg->pf != NFPROTO_NETDEV)
+ hook_list = &net->nf.hooks[reg->pf][reg->hooknum];
+ else if (reg->hooknum == NF_NETDEV_INGRESS) {
#ifdef CONFIG_NETFILTER_INGRESS
- if (reg->hooknum == NF_NETDEV_INGRESS) {
- BUG_ON(reg->dev == NULL);
- nf_hook_list = &reg->dev->nf_hooks_ingress;
- net_inc_ingress_queue();
- break;
- }
+ if (reg->dev && dev_net(reg->dev) == net)
+ hook_list = &reg->dev->nf_hooks_ingress;
#endif
- /* Fall through. */
- default:
- nf_hook_list = &nf_hooks[reg->pf][reg->hooknum];
- break;
+ }
+ return hook_list;
+}
+
+struct nf_hook_entry {
+ const struct nf_hook_ops *orig_ops;
+ struct nf_hook_ops ops;
+};
+
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+ struct list_head *hook_list;
+ struct nf_hook_entry *entry;
+ struct nf_hook_ops *elem;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->orig_ops = reg;
+ entry->ops = *reg;
+
+ hook_list = nf_find_hook_list(net, reg);
+ if (!hook_list) {
+ kfree(entry);
+ return -ENOENT;
}
- list_for_each_entry(elem, nf_hook_list, list) {
+ mutex_lock(&nf_hook_mutex);
+ list_for_each_entry(elem, hook_list, list) {
if (reg->priority < elem->priority)
break;
}
- list_add_rcu(&reg->list, elem->list.prev);
+ list_add_rcu(&entry->ops.list, elem->list.prev);
mutex_unlock(&nf_hook_mutex);
+#ifdef CONFIG_NETFILTER_INGRESS
+ if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+ net_inc_ingress_queue();
+#endif
#ifdef HAVE_JUMP_LABEL
static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
return 0;
}
-EXPORT_SYMBOL(nf_register_hook);
+EXPORT_SYMBOL(nf_register_net_hook);
-void nf_unregister_hook(struct nf_hook_ops *reg)
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
+ struct list_head *hook_list;
+ struct nf_hook_entry *entry;
+ struct nf_hook_ops *elem;
+
+ hook_list = nf_find_hook_list(net, reg);
+ if (!hook_list)
+ return;
+
mutex_lock(&nf_hook_mutex);
- list_del_rcu(&reg->list);
- mutex_unlock(&nf_hook_mutex);
- switch (reg->pf) {
- case NFPROTO_NETDEV:
-#ifdef CONFIG_NETFILTER_INGRESS
- if (reg->hooknum == NF_NETDEV_INGRESS) {
- net_dec_ingress_queue();
+ list_for_each_entry(elem, hook_list, list) {
+ entry = container_of(elem, struct nf_hook_entry, ops);
+ if (entry->orig_ops == reg) {
+ list_del_rcu(&entry->ops.list);
break;
}
- break;
-#endif
- default:
- break;
}
+ mutex_unlock(&nf_hook_mutex);
+ if (&elem->list == hook_list) {
+ WARN(1, "nf_unregister_net_hook: hook not found!\n");
+ return;
+ }
+#ifdef CONFIG_NETFILTER_INGRESS
+ if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+ net_dec_ingress_queue();
+#endif
#ifdef HAVE_JUMP_LABEL
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
synchronize_net();
- nf_queue_nf_hook_drop(reg);
+ nf_queue_nf_hook_drop(net, &entry->ops);
+ kfree(entry);
+}
+EXPORT_SYMBOL(nf_unregister_net_hook);
+
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = 0; i < n; i++) {
+ err = nf_register_net_hook(net, &reg[i]);
+ if (err)
+ goto err;
+ }
+ return err;
+
+err:
+ if (i > 0)
+ nf_unregister_net_hooks(net, reg, i);
+ return err;
+}
+EXPORT_SYMBOL(nf_register_net_hooks);
+
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n)
+{
+ while (n-- > 0)
+ nf_unregister_net_hook(net, &reg[n]);
+}
+EXPORT_SYMBOL(nf_unregister_net_hooks);
+
+static LIST_HEAD(nf_hook_list);
+
+int nf_register_hook(struct nf_hook_ops *reg)
+{
+ struct net *net, *last;
+ int ret;
+
+ rtnl_lock();
+ for_each_net(net) {
+ ret = nf_register_net_hook(net, reg);
+ if (ret && ret != -ENOENT)
+ goto rollback;
+ }
+ list_add_tail(&reg->list, &nf_hook_list);
+ rtnl_unlock();
+
+ return 0;
+rollback:
+ last = net;
+ for_each_net(net) {
+ if (net == last)
+ break;
+ nf_unregister_net_hook(net, reg);
+ }
+ rtnl_unlock();
+ return ret;
+}
+EXPORT_SYMBOL(nf_register_hook);
+
+void nf_unregister_hook(struct nf_hook_ops *reg)
+{
+ struct net *net;
+
+ rtnl_lock();
+ list_del(&reg->list);
+ for_each_net(net)
+ nf_unregister_net_hook(net, reg);
+ rtnl_unlock();
}
EXPORT_SYMBOL(nf_unregister_hook);
@@ -295,8 +398,46 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
EXPORT_SYMBOL(nf_nat_decode_session_hook);
#endif
+static int nf_register_hook_list(struct net *net)
+{
+ struct nf_hook_ops *elem;
+ int ret;
+
+ rtnl_lock();
+ list_for_each_entry(elem, &nf_hook_list, list) {
+ ret = nf_register_net_hook(net, elem);
+ if (ret && ret != -ENOENT)
+ goto out_undo;
+ }
+ rtnl_unlock();
+ return 0;
+
+out_undo:
+ list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
+ nf_unregister_net_hook(net, elem);
+ rtnl_unlock();
+ return ret;
+}
+
+static void nf_unregister_hook_list(struct net *net)
+{
+ struct nf_hook_ops *elem;
+
+ rtnl_lock();
+ list_for_each_entry(elem, &nf_hook_list, list)
+ nf_unregister_net_hook(net, elem);
+ rtnl_unlock();
+}
+
static int __net_init netfilter_net_init(struct net *net)
{
+ int i, h, ret;
+
+ for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
+ for (h = 0; h < NF_MAX_HOOKS; h++)
+ INIT_LIST_HEAD(&net->nf.hooks[i][h]);
+ }
+
#ifdef CONFIG_PROC_FS
net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
net->proc_net);
@@ -307,11 +448,16 @@ static int __net_init netfilter_net_init(struct net *net)
return -ENOMEM;
}
#endif
- return 0;
+ ret = nf_register_hook_list(net);
+ if (ret)
+ remove_proc_entry("netfilter", net->proc_net);
+
+ return ret;
}
static void __net_exit netfilter_net_exit(struct net *net)
{
+ nf_unregister_hook_list(net);
remove_proc_entry("netfilter", net->proc_net);
}
@@ -322,12 +468,7 @@ static struct pernet_operations netfilter_net_ops = {
int __init netfilter_init(void)
{
- int i, h, ret;
-
- for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) {
- for (h = 0; h < NF_MAX_HOOKS; h++)
- INIT_LIST_HEAD(&nf_hooks[i][h]);
- }
+ int ret;
ret = register_pernet_subsys(&netfilter_net_ops);
if (ret < 0)
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 7e8141647943..a2ff7d746ebf 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -137,7 +137,7 @@ struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name)
void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
{
- if (scheduler && scheduler->module)
+ if (scheduler)
module_put(scheduler->module);
}
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index b45da90fad32..67197731eb68 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -42,6 +42,8 @@ static const char *const sctp_conntrack_names[] = {
"SHUTDOWN_SENT",
"SHUTDOWN_RECD",
"SHUTDOWN_ACK_SENT",
+ "HEARTBEAT_SENT",
+ "HEARTBEAT_ACKED",
};
#define SECS * HZ
@@ -57,6 +59,8 @@ static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
[SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
[SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
};
#define sNO SCTP_CONNTRACK_NONE
@@ -67,6 +71,8 @@ static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
#define sSS SCTP_CONNTRACK_SHUTDOWN_SENT
#define sSR SCTP_CONNTRACK_SHUTDOWN_RECD
#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+#define sHS SCTP_CONNTRACK_HEARTBEAT_SENT
+#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
#define sIV SCTP_CONNTRACK_MAX
/*
@@ -88,6 +94,10 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
to that of the SHUTDOWN chunk.
CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
the SHUTDOWN chunk. Connection is closed.
+HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow.
+HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK in the direction opposite to
+ that of the HEARTBEAT chunk. Secondary connection is
+ established.
*/
/* TODO
@@ -97,36 +107,40 @@ CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
- Check the error type in the reply dir before transitioning from
cookie echoed to closed.
- Sec 5.2.4 of RFC 2960
- - Multi Homing support.
+ - Full Multi Homing support.
*/
/* SCTP conntrack state transitions */
-static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
+static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
{
/* ORIGINAL */
-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA},
-/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/
-/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
+/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
+/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
+/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
+/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
+/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
},
{
/* REPLY */
-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */
-/* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */
-/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
+/* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
+/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
+/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
+/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
+/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
+/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
}
};
@@ -278,9 +292,16 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n");
i = 8;
break;
+ case SCTP_CID_HEARTBEAT:
+ pr_debug("SCTP_CID_HEARTBEAT");
+ i = 9;
+ break;
+ case SCTP_CID_HEARTBEAT_ACK:
+ pr_debug("SCTP_CID_HEARTBEAT_ACK");
+ i = 10;
+ break;
default:
- /* Other chunks like DATA, SACK, HEARTBEAT and
- its ACK do not cause a change in state */
+ /* Other chunks like DATA or SACK do not change the state */
pr_debug("Unknown chunk type, Will stay in %s\n",
sctp_conntrack_names[cur_state]);
return cur_state;
@@ -329,6 +350,8 @@ static int sctp_packet(struct nf_conn *ct,
!test_bit(SCTP_CID_COOKIE_ECHO, map) &&
!test_bit(SCTP_CID_ABORT, map) &&
!test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
+ !test_bit(SCTP_CID_HEARTBEAT, map) &&
+ !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
sh->vtag != ct->proto.sctp.vtag[dir]) {
pr_debug("Verification tag check failed\n");
goto out;
@@ -357,6 +380,16 @@ static int sctp_packet(struct nf_conn *ct,
/* Sec 8.5.1 (D) */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
+ } else if (sch->type == SCTP_CID_HEARTBEAT ||
+ sch->type == SCTP_CID_HEARTBEAT_ACK) {
+ if (ct->proto.sctp.vtag[dir] == 0) {
+ pr_debug("Setting vtag %x for dir %d\n",
+ sh->vtag, dir);
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+ pr_debug("Verification tag check failed\n");
+ goto out_unlock;
+ }
}
old_state = ct->proto.sctp.state;
@@ -466,6 +499,10 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
/* Sec 8.5.1 (A) */
return false;
}
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+ pr_debug("Setting vtag %x for secondary conntrack\n",
+ sh->vtag);
+ ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
}
/* If it is a shutdown ack OOTB packet, we expect a return
shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
@@ -610,6 +647,8 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
[CTA_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
@@ -658,6 +697,18 @@ static struct ctl_table sctp_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+ {
+ .procname = "nf_conntrack_sctp_timeout_heartbeat_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
{ }
};
@@ -730,6 +781,8 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
+ pn->ctl_table[7].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_SENT];
+ pn->ctl_table[8].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_ACKED];
#endif
return 0;
}
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 399210693c2a..065522564ac6 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -19,7 +19,7 @@ unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
/* nf_queue.c */
int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
struct nf_hook_state *state, unsigned int queuenum);
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops);
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops);
int __init netfilter_queue_init(void);
/* nf_log.c */
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 8a8b2abc35ff..96777f9a9350 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -105,21 +105,15 @@ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
{
const struct nf_queue_handler *qh;
- struct net *net;
- rtnl_lock();
rcu_read_lock();
qh = rcu_dereference(queue_handler);
- if (qh) {
- for_each_net(net) {
- qh->nf_hook_drop(net, ops);
- }
- }
+ if (qh)
+ qh->nf_hook_drop(net, ops);
rcu_read_unlock();
- rtnl_unlock();
}
/*
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index cfe636808541..4a41eb92bcc0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -130,20 +130,24 @@ static void nft_trans_destroy(struct nft_trans *trans)
int nft_register_basechain(struct nft_base_chain *basechain,
unsigned int hook_nops)
{
+ struct net *net = read_pnet(&basechain->pnet);
+
if (basechain->flags & NFT_BASECHAIN_DISABLED)
return 0;
- return nf_register_hooks(basechain->ops, hook_nops);
+ return nf_register_net_hooks(net, basechain->ops, hook_nops);
}
EXPORT_SYMBOL_GPL(nft_register_basechain);
void nft_unregister_basechain(struct nft_base_chain *basechain,
unsigned int hook_nops)
{
+ struct net *net = read_pnet(&basechain->pnet);
+
if (basechain->flags & NFT_BASECHAIN_DISABLED)
return;
- nf_unregister_hooks(basechain->ops, hook_nops);
+ nf_unregister_net_hooks(net, basechain->ops, hook_nops);
}
EXPORT_SYMBOL_GPL(nft_unregister_basechain);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index f77bad46ac68..05d0b03530f6 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -114,7 +114,6 @@ unsigned int
nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
{
const struct nft_chain *chain = ops->priv, *basechain = chain;
- const struct net *chain_net = read_pnet(&nft_base_chain(basechain)->pnet);
const struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
const struct nft_rule *rule;
const struct nft_expr *expr, *last;
@@ -125,10 +124,6 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
int rulenum;
unsigned int gencursor = nft_genmask_cur(net);
- /* Ignore chains that are not for the current network namespace */
- if (!net_eq(net, chain_net))
- return NF_ACCEPT;
-
do_chain:
rulenum = 0;
rule = list_entry(&chain->rules, struct nft_rule, list);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 52561e1c31e2..cb2f13ebb5a6 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -166,11 +166,13 @@ void nft_meta_get_eval(const struct nft_expr *expr,
goto err;
*dest = out->group;
break;
+#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
if (skb->sk == NULL || !sk_fullsock(skb->sk))
goto err;
*dest = skb->sk->sk_classid;
break;
+#endif
default:
WARN_ON(1);
goto err;
@@ -246,7 +248,9 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
case NFT_META_CPU:
case NFT_META_IIFGROUP:
case NFT_META_OIFGROUP:
+#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
+#endif
len = sizeof(u32);
break;
case NFT_META_IIFNAME:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index d324fe71260c..9b42b5ea6dcd 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -67,9 +67,6 @@ static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
[NFPROTO_IPV6] = "ip6",
};
-/* Allow this many total (re)entries. */
-static const unsigned int xt_jumpstack_multiplier = 2;
-
/* Registration hooks for targets. */
int xt_register_target(struct xt_target *target)
{
@@ -688,8 +685,6 @@ void xt_free_table_info(struct xt_table_info *info)
kvfree(info->jumpstack);
}
- free_percpu(info->stackptr);
-
kvfree(info);
}
EXPORT_SYMBOL(xt_free_table_info);
@@ -732,15 +727,14 @@ EXPORT_SYMBOL_GPL(xt_compat_unlock);
DEFINE_PER_CPU(seqcount_t, xt_recseq);
EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
+struct static_key xt_tee_enabled __read_mostly;
+EXPORT_SYMBOL_GPL(xt_tee_enabled);
+
static int xt_jumpstack_alloc(struct xt_table_info *i)
{
unsigned int size;
int cpu;
- i->stackptr = alloc_percpu(unsigned int);
- if (i->stackptr == NULL)
- return -ENOMEM;
-
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
i->jumpstack = vzalloc(size);
@@ -749,8 +743,21 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
if (i->jumpstack == NULL)
return -ENOMEM;
- i->stacksize *= xt_jumpstack_multiplier;
- size = sizeof(void *) * i->stacksize;
+ /* ruleset without jumps -- no stack needed */
+ if (i->stacksize == 0)
+ return 0;
+
+ /* Jumpstack needs to be able to record two full callchains, one
+ * from the first rule set traversal, plus one table reentrancy
+ * via -j TEE without clobbering the callchain that brought us to
+ * TEE target.
+ *
+ * This is done by allocating two jumpstacks per cpu, on reentry
+ * the upper half of the stack is used.
+ *
+ * see the jumpstack setup in ipt_do_table() for more details.
+ */
+ size = sizeof(void *) * i->stacksize * 2u;
for_each_possible_cpu(cpu) {
if (size > PAGE_SIZE)
i->jumpstack[cpu] = vmalloc_node(size,
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index a747eb475b68..c5d6556dbc5e 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -37,7 +37,6 @@ struct xt_tee_priv {
};
static const union nf_inet_addr tee_zero_address;
-static DEFINE_PER_CPU(bool, tee_active);
static struct net *pick_net(struct sk_buff *skb)
{
@@ -88,7 +87,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
const struct xt_tee_tginfo *info = par->targinfo;
struct iphdr *iph;
- if (__this_cpu_read(tee_active))
+ if (__this_cpu_read(nf_skb_duplicated))
return XT_CONTINUE;
/*
* Copy the skb, and route the copy. Will later return %XT_CONTINUE for
@@ -125,9 +124,9 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
ip_send_check(iph);
if (tee_tg_route4(skb, info)) {
- __this_cpu_write(tee_active, true);
+ __this_cpu_write(nf_skb_duplicated, true);
ip_local_out(skb);
- __this_cpu_write(tee_active, false);
+ __this_cpu_write(nf_skb_duplicated, false);
} else {
kfree_skb(skb);
}
@@ -170,7 +169,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
- if (__this_cpu_read(tee_active))
+ if (__this_cpu_read(nf_skb_duplicated))
return XT_CONTINUE;
skb = pskb_copy(skb, GFP_ATOMIC);
if (skb == NULL)
@@ -188,9 +187,9 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
--iph->hop_limit;
}
if (tee_tg_route6(skb, info)) {
- __this_cpu_write(tee_active, true);
+ __this_cpu_write(nf_skb_duplicated, true);
ip6_local_out(skb);
- __this_cpu_write(tee_active, false);
+ __this_cpu_write(nf_skb_duplicated, false);
} else {
kfree_skb(skb);
}
@@ -252,6 +251,7 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
} else
info->priv = NULL;
+ static_key_slow_inc(&xt_tee_enabled);
return 0;
}
@@ -263,6 +263,7 @@ static void tee_tg_destroy(const struct xt_tgdtor_param *par)
unregister_netdevice_notifier(&info->priv->notifier);
kfree(info->priv);
}
+ static_key_slow_dec(&xt_tee_enabled);
}
static struct xt_target tee_tg_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index cca96cec1b68..d0c96c5ae29a 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -272,8 +272,7 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
hp->source, lport ? lport : hp->dest,
skb->dev, NFT_LOOKUP_LISTENER);
if (sk2) {
- inet_twsk_deschedule(inet_twsk(sk));
- inet_twsk_put(inet_twsk(sk));
+ inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
}
}
@@ -437,8 +436,7 @@ tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
tgi->lport ? tgi->lport : hp->dest,
skb->dev, NFT_LOOKUP_LISTENER);
if (sk2) {
- inet_twsk_deschedule(inet_twsk(sk));
- inet_twsk_put(inet_twsk(sk));
+ inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
}
}
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 15840401a2ce..422dc0567de9 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -34,7 +34,7 @@ config OPENVSWITCH
config OPENVSWITCH_GRE
tristate "Open vSwitch GRE tunneling support"
depends on OPENVSWITCH
- depends on NET_IPGRE_DEMUX
+ depends on NET_IPGRE
default OPENVSWITCH
---help---
If you say Y here, then the Open vSwitch will be able create GRE
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 91b9478413ef..6e1701de04d8 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -15,6 +15,6 @@ openvswitch-y := \
vport-internal_dev.o \
vport-netdev.o
+obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
-obj-$(CONFIG_OPENVSWITCH_VXLAN) += vport-vxlan.o
obj-$(CONFIG_OPENVSWITCH_GRE) += vport-gre.o
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ee34f474ad14..14da52ddd327 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -619,7 +619,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key, const struct nlattr *attr,
const struct nlattr *actions, int actions_len)
{
- struct ovs_tunnel_info info;
+ struct ip_tunnel_info info;
struct dp_upcall_info upcall;
const struct nlattr *a;
int rem;
@@ -677,9 +677,12 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
a = nla_next(a, &rem)) {
+ u32 probability;
+
switch (nla_type(a)) {
case OVS_SAMPLE_ATTR_PROBABILITY:
- if (prandom_u32() >= nla_get_u32(a))
+ probability = nla_get_u32(a);
+ if (!probability || prandom_u32() > probability)
return 0;
break;
@@ -741,7 +744,15 @@ static int execute_set_action(struct sk_buff *skb,
{
/* Only tunnel set execution is supported without a mask. */
if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
- OVS_CB(skb)->egress_tun_info = nla_data(a);
+ struct ovs_tunnel_info *tun = nla_data(a);
+
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *)tun->tun_dst);
+ skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
+
+ /* FIXME: Remove when all vports have been converted */
+ OVS_CB(skb)->egress_tun_info = &tun->tun_dst->u.tun_info;
+
return 0;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ff8c4a4c1609..ffe984f5b95c 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -176,7 +176,7 @@ static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
const char *ovs_dp_name(const struct datapath *dp)
{
struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
- return vport->ops->get_name(vport);
+ return ovs_vport_name(vport);
}
static int get_dpifindex(const struct datapath *dp)
@@ -188,7 +188,7 @@ static int get_dpifindex(const struct datapath *dp)
local = ovs_vport_rcu(dp, OVSP_LOCAL);
if (local)
- ifindex = netdev_vport_priv(local)->dev->ifindex;
+ ifindex = local->dev->ifindex;
else
ifindex = 0;
@@ -1018,7 +1018,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
ovs_unlock();
- ovs_nla_free_flow_actions(old_acts);
+ ovs_nla_free_flow_actions_rcu(old_acts);
ovs_flow_free(new_flow, false);
}
@@ -1030,7 +1030,7 @@ err_unlock_ovs:
ovs_unlock();
kfree_skb(reply);
err_kfree_acts:
- kfree(acts);
+ ovs_nla_free_flow_actions(acts);
err_kfree_flow:
ovs_flow_free(new_flow, false);
error:
@@ -1157,7 +1157,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
if (reply)
ovs_notify(&dp_flow_genl_family, reply, info);
if (old_acts)
- ovs_nla_free_flow_actions(old_acts);
+ ovs_nla_free_flow_actions_rcu(old_acts);
return 0;
@@ -1165,7 +1165,7 @@ err_unlock_ovs:
ovs_unlock();
kfree_skb(reply);
err_kfree_acts:
- kfree(acts);
+ ovs_nla_free_flow_actions(acts);
error:
return error;
}
@@ -1800,7 +1800,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
nla_put_string(skb, OVS_VPORT_ATTR_NAME,
- vport->ops->get_name(vport)))
+ ovs_vport_name(vport)))
goto nla_put_failure;
ovs_vport_get_stats(vport, &vport_stats);
@@ -2219,13 +2219,10 @@ static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
struct vport *vport;
hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
- struct netdev_vport *netdev_vport;
-
if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
continue;
- netdev_vport = netdev_vport_priv(vport);
- if (dev_net(netdev_vport->dev) == dnet)
+ if (dev_net(vport->dev) == dnet)
list_add(&vport->detach_list, head);
}
}
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index cd691e935e08..6b28c5cedb23 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -25,6 +25,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/u64_stats_sync.h>
+#include <net/ip_tunnels.h>
#include "flow.h"
#include "flow_table.h"
@@ -98,7 +99,7 @@ struct datapath {
* when a packet is received by OVS.
*/
struct ovs_skb_cb {
- struct ovs_tunnel_info *egress_tun_info;
+ struct ip_tunnel_info *egress_tun_info;
struct vport *input_vport;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
@@ -114,7 +115,7 @@ struct ovs_skb_cb {
* @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY.
*/
struct dp_upcall_info {
- const struct ovs_tunnel_info *egress_tun_info;
+ const struct ip_tunnel_info *egress_tun_info;
const struct nlattr *userdata;
const struct nlattr *actions;
int actions_len;
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 2c631fe76be1..a7a80a6b77b0 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -58,13 +58,10 @@ void ovs_dp_notify_wq(struct work_struct *work)
struct hlist_node *n;
hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
- struct netdev_vport *netdev_vport;
-
if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
continue;
- netdev_vport = netdev_vport_priv(vport);
- if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
+ if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
dp_detach_port_notify(vport);
}
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index bc7b0aba994a..8db22ef73626 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -682,12 +682,12 @@ int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
return key_extract(skb, key);
}
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
struct sk_buff *skb, struct sw_flow_key *key)
{
/* Extract metadata from packet. */
if (tun_info) {
- memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key));
+ memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
if (tun_info->options) {
BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index a076e445ccc2..b62cdb3e3589 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -32,31 +32,11 @@
#include <linux/time.h>
#include <linux/flex_array.h>
#include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+#include <net/dst_metadata.h>
struct sk_buff;
-/* Used to memset ovs_key_ipv4_tunnel padding. */
-#define OVS_TUNNEL_KEY_SIZE \
- (offsetof(struct ovs_key_ipv4_tunnel, tp_dst) + \
- FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, tp_dst))
-
-struct ovs_key_ipv4_tunnel {
- __be64 tun_id;
- __be32 ipv4_src;
- __be32 ipv4_dst;
- __be16 tun_flags;
- u8 ipv4_tos;
- u8 ipv4_ttl;
- __be16 tp_src;
- __be16 tp_dst;
-} __packed __aligned(4); /* Minimize padding. */
-
-struct ovs_tunnel_info {
- struct ovs_key_ipv4_tunnel tunnel;
- const void *options;
- u8 options_len;
-};
-
/* Store options at the end of the array if they are less than the
* maximum size. This allows us to get the benefits of variable length
* matching for small options.
@@ -66,54 +46,9 @@ struct ovs_tunnel_info {
#define TUN_METADATA_OPTS(flow_key, opt_len) \
((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
-static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
- __be32 saddr, __be32 daddr,
- u8 tos, u8 ttl,
- __be16 tp_src,
- __be16 tp_dst,
- __be64 tun_id,
- __be16 tun_flags,
- const void *opts,
- u8 opts_len)
-{
- tun_info->tunnel.tun_id = tun_id;
- tun_info->tunnel.ipv4_src = saddr;
- tun_info->tunnel.ipv4_dst = daddr;
- tun_info->tunnel.ipv4_tos = tos;
- tun_info->tunnel.ipv4_ttl = ttl;
- tun_info->tunnel.tun_flags = tun_flags;
-
- /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
- * the upper tunnel are used.
- * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
- */
- tun_info->tunnel.tp_src = tp_src;
- tun_info->tunnel.tp_dst = tp_dst;
-
- /* Clear struct padding. */
- if (sizeof(tun_info->tunnel) != OVS_TUNNEL_KEY_SIZE)
- memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE,
- 0, sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
-
- tun_info->options = opts;
- tun_info->options_len = opts_len;
-}
-
-static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
- const struct iphdr *iph,
- __be16 tp_src,
- __be16 tp_dst,
- __be64 tun_id,
- __be16 tun_flags,
- const void *opts,
- u8 opts_len)
-{
- __ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
- iph->tos, iph->ttl,
- tp_src, tp_dst,
- tun_id, tun_flags,
- opts, opts_len);
-}
+struct ovs_tunnel_info {
+ struct metadata_dst *tun_dst;
+};
#define OVS_SW_FLOW_KEY_METADATA_SIZE \
(offsetof(struct sw_flow_key, recirc_id) + \
@@ -122,7 +57,7 @@ static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
struct sw_flow_key {
u8 tun_opts[255];
u8 tun_opts_len;
- struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */
+ struct ip_tunnel_key tun_key; /* Encapsulating tunnel key. */
struct {
u32 priority; /* Packet QoS priority. */
u32 skb_mark; /* SKB mark. */
@@ -273,7 +208,7 @@ void ovs_flow_stats_clear(struct sw_flow *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
struct sk_buff *skb,
struct sw_flow_key *key);
/* Extract key from packet coming from userspace. */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 624e41c4267f..a6eb77ab1a64 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -47,9 +47,9 @@
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/mpls.h>
+#include <net/vxlan.h>
#include "flow_netlink.h"
-#include "vport-vxlan.h"
struct ovs_len_tbl {
int len;
@@ -475,7 +475,7 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
{
struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
unsigned long opt_key_offset;
- struct ovs_vxlan_opts opts;
+ struct vxlan_metadata opts;
int err;
BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
@@ -626,7 +626,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
static int vxlan_opt_to_nlattr(struct sk_buff *skb,
const void *tun_opts, int swkey_tun_opts_len)
{
- const struct ovs_vxlan_opts *opts = tun_opts;
+ const struct vxlan_metadata *opts = tun_opts;
struct nlattr *nla;
nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
@@ -641,7 +641,7 @@ static int vxlan_opt_to_nlattr(struct sk_buff *skb,
}
static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *output,
+ const struct ip_tunnel_key *output,
const void *tun_opts, int swkey_tun_opts_len)
{
if (output->tun_flags & TUNNEL_KEY &&
@@ -689,7 +689,7 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
}
static int ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *output,
+ const struct ip_tunnel_key *output,
const void *tun_opts, int swkey_tun_opts_len)
{
struct nlattr *nla;
@@ -708,9 +708,9 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb,
}
int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
- const struct ovs_tunnel_info *egress_tun_info)
+ const struct ip_tunnel_info *egress_tun_info)
{
- return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel,
+ return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
egress_tun_info->options,
egress_tun_info->options_len);
}
@@ -1548,11 +1548,48 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
return sfa;
}
+static void ovs_nla_free_set_action(const struct nlattr *a)
+{
+ const struct nlattr *ovs_key = nla_data(a);
+ struct ovs_tunnel_info *ovs_tun;
+
+ switch (nla_type(ovs_key)) {
+ case OVS_KEY_ATTR_TUNNEL_INFO:
+ ovs_tun = nla_data(ovs_key);
+ dst_release((struct dst_entry *)ovs_tun->tun_dst);
+ break;
+ }
+}
+
+void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+{
+ const struct nlattr *a;
+ int rem;
+
+ if (!sf_acts)
+ return;
+
+ nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
+ switch (nla_type(a)) {
+ case OVS_ACTION_ATTR_SET:
+ ovs_nla_free_set_action(a);
+ break;
+ }
+ }
+
+ kfree(sf_acts);
+}
+
+static void __ovs_nla_free_flow_actions(struct rcu_head *head)
+{
+ ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
+}
+
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
-void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
{
- kfree_rcu(sf_acts, rcu);
+ call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
}
static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
@@ -1746,7 +1783,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
{
struct sw_flow_match match;
struct sw_flow_key key;
- struct ovs_tunnel_info *tun_info;
+ struct metadata_dst *tun_dst;
+ struct ip_tunnel_info *tun_info;
+ struct ovs_tunnel_info *ovs_tun;
struct nlattr *a;
int err = 0, start, opts_type;
@@ -1771,13 +1810,23 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
if (start < 0)
return start;
+ tun_dst = metadata_dst_alloc(key.tun_opts_len, GFP_KERNEL);
+ if (!tun_dst)
+ return -ENOMEM;
+
a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
- sizeof(*tun_info) + key.tun_opts_len, log);
- if (IS_ERR(a))
+ sizeof(*ovs_tun), log);
+ if (IS_ERR(a)) {
+ dst_release((struct dst_entry *)tun_dst);
return PTR_ERR(a);
+ }
+
+ ovs_tun = nla_data(a);
+ ovs_tun->tun_dst = tun_dst;
- tun_info = nla_data(a);
- tun_info->tunnel = key.tun_key;
+ tun_info = &tun_dst->u.tun_info;
+ tun_info->mode = IP_TUNNEL_INFO_TX;
+ tun_info->key = key.tun_key;
tun_info->options_len = key.tun_opts_len;
if (tun_info->options_len) {
@@ -2177,7 +2226,7 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
key->eth.tci, log);
if (err)
- kfree(*sfa);
+ ovs_nla_free_flow_actions(*sfa);
return err;
}
@@ -2227,13 +2276,14 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
switch (key_type) {
case OVS_KEY_ATTR_TUNNEL_INFO: {
- struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
+ struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
+ struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
if (!start)
return -EMSGSIZE;
- err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
+ err = ipv4_tun_to_nlattr(skb, &tun_info->key,
tun_info->options_len ?
tun_info->options : NULL,
tun_info->options_len);
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 5c3d75bff310..acd074408f0a 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -55,7 +55,7 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key,
const struct nlattr *mask, bool log);
int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
- const struct ovs_tunnel_info *);
+ const struct ip_tunnel_info *);
bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
@@ -69,5 +69,6 @@ int ovs_nla_put_actions(const struct nlattr *attr,
int len, struct sk_buff *skb);
void ovs_nla_free_flow_actions(struct sw_flow_actions *);
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *);
#endif /* flow_netlink.h */
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 65523948fb95..3a9d1dde76ed 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -18,6 +18,7 @@
#include "flow.h"
#include "datapath.h"
+#include "flow_netlink.h"
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -143,7 +144,8 @@ static void flow_free(struct sw_flow *flow)
if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key);
- kfree((struct sw_flow_actions __force *)flow->sf_acts);
+ if (flow->sf_acts)
+ ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
for_each_node(node)
if (flow->stats[node])
kmem_cache_free(flow_stats_cache,
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 208c576bd1b6..1da3a14d1010 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -77,7 +77,7 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
struct vport *vport = gs->rcv_data;
struct genevehdr *geneveh = geneve_hdr(skb);
int opts_len;
- struct ovs_tunnel_info tun_info;
+ struct ip_tunnel_info tun_info;
__be64 key;
__be16 flags;
@@ -90,10 +90,9 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
key = vni_to_tunnel_id(geneveh->vni);
- ovs_flow_tun_info_init(&tun_info, ip_hdr(skb),
- udp_hdr(skb)->source, udp_hdr(skb)->dest,
- key, flags,
- geneveh->options, opts_len);
+ ip_tunnel_info_init(&tun_info, ip_hdr(skb),
+ udp_hdr(skb)->source, udp_hdr(skb)->dest,
+ key, flags, geneveh->options, opts_len);
ovs_vport_receive(vport, skb, &tun_info);
}
@@ -165,8 +164,8 @@ error:
static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
{
- const struct ovs_key_ipv4_tunnel *tun_key;
- struct ovs_tunnel_info *tun_info;
+ const struct ip_tunnel_key *tun_key;
+ struct ip_tunnel_info *tun_info;
struct net *net = ovs_dp_get_net(vport->dp);
struct geneve_port *geneve_port = geneve_vport(vport);
__be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
@@ -183,7 +182,7 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
goto error;
}
- tun_key = &tun_info->tunnel;
+ tun_key = &tun_info->key;
rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
@@ -225,7 +224,7 @@ static const char *geneve_get_name(const struct vport *vport)
}
static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *egress_tun_info)
+ struct ip_tunnel_info *egress_tun_info)
{
struct geneve_port *geneve_port = geneve_vport(vport);
struct net *net = ovs_dp_get_net(vport->dp);
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index f17ac9642f4e..871801d2ac23 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -45,239 +45,47 @@
#include "datapath.h"
#include "vport.h"
+#include "vport-netdev.h"
static struct vport_ops ovs_gre_vport_ops;
-/* Returns the least-significant 32 bits of a __be64. */
-static __be32 be64_get_low32(__be64 x)
+static struct vport *gre_tnl_create(const struct vport_parms *parms)
{
-#ifdef __BIG_ENDIAN
- return (__force __be32)x;
-#else
- return (__force __be32)((__force u64)x >> 32);
-#endif
-}
-
-static __be16 filter_tnl_flags(__be16 flags)
-{
- return flags & (TUNNEL_CSUM | TUNNEL_KEY);
-}
-
-static struct sk_buff *__build_header(struct sk_buff *skb,
- int tunnel_hlen)
-{
- struct tnl_ptk_info tpi;
- const struct ovs_key_ipv4_tunnel *tun_key;
-
- tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-
- skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
- if (IS_ERR(skb))
- return skb;
-
- tpi.flags = filter_tnl_flags(tun_key->tun_flags);
- tpi.proto = htons(ETH_P_TEB);
- tpi.key = be64_get_low32(tun_key->tun_id);
- tpi.seq = 0;
- gre_build_header(skb, &tpi, tunnel_hlen);
-
- return skb;
-}
-
-static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
-{
-#ifdef __BIG_ENDIAN
- return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
-#else
- return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
-#endif
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_rcv(struct sk_buff *skb,
- const struct tnl_ptk_info *tpi)
-{
- struct ovs_tunnel_info tun_info;
- struct ovs_net *ovs_net;
- struct vport *vport;
- __be64 key;
-
- ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
- vport = rcu_dereference(ovs_net->vport_net.gre_vport);
- if (unlikely(!vport))
- return PACKET_REJECT;
-
- key = key_to_tunnel_id(tpi->key, tpi->seq);
- ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
- filter_tnl_flags(tpi->flags), NULL, 0);
-
- ovs_vport_receive(vport, skb, &tun_info);
- return PACKET_RCVD;
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_err(struct sk_buff *skb, u32 info,
- const struct tnl_ptk_info *tpi)
-{
- struct ovs_net *ovs_net;
+ struct net *net = ovs_dp_get_net(parms->dp);
+ struct net_device *dev;
struct vport *vport;
- ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
- vport = rcu_dereference(ovs_net->vport_net.gre_vport);
-
- if (unlikely(!vport))
- return PACKET_REJECT;
- else
- return PACKET_RCVD;
-}
-
-static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
- struct net *net = ovs_dp_get_net(vport->dp);
- const struct ovs_key_ipv4_tunnel *tun_key;
- struct flowi4 fl;
- struct rtable *rt;
- int min_headroom;
- int tunnel_hlen;
- __be16 df;
- int err;
-
- if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
- err = -EINVAL;
- goto err_free_skb;
- }
-
- tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
- rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
- if (IS_ERR(rt)) {
- err = PTR_ERR(rt);
- goto err_free_skb;
- }
-
- tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
-
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + tunnel_hlen + sizeof(struct iphdr)
- + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
- if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
- int head_delta = SKB_DATA_ALIGN(min_headroom -
- skb_headroom(skb) +
- 16);
- err = pskb_expand_head(skb, max_t(int, head_delta, 0),
- 0, GFP_ATOMIC);
- if (unlikely(err))
- goto err_free_rt;
- }
-
- skb = vlan_hwaccel_push_inside(skb);
- if (unlikely(!skb)) {
- err = -ENOMEM;
- goto err_free_rt;
- }
-
- /* Push Tunnel header. */
- skb = __build_header(skb, tunnel_hlen);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- skb = NULL;
- goto err_free_rt;
+ vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
+ if (IS_ERR(vport))
+ return vport;
+
+ rtnl_lock();
+ dev = gretap_fb_dev_create(net, parms->name, NET_NAME_USER);
+ if (IS_ERR(dev)) {
+ rtnl_unlock();
+ ovs_vport_free(vport);
+ return ERR_CAST(dev);
}
- df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
- htons(IP_DF) : 0;
-
- skb->ignore_df = 1;
-
- return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
- tun_key->ipv4_dst, IPPROTO_GRE,
- tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
-err_free_rt:
- ip_rt_put(rt);
-err_free_skb:
- kfree_skb(skb);
- return err;
-}
-
-static struct gre_cisco_protocol gre_protocol = {
- .handler = gre_rcv,
- .err_handler = gre_err,
- .priority = 1,
-};
-
-static int gre_ports;
-static int gre_init(void)
-{
- int err;
-
- gre_ports++;
- if (gre_ports > 1)
- return 0;
-
- err = gre_cisco_register(&gre_protocol);
- if (err)
- pr_warn("cannot register gre protocol handler\n");
-
- return err;
-}
-
-static void gre_exit(void)
-{
- gre_ports--;
- if (gre_ports > 0)
- return;
-
- gre_cisco_unregister(&gre_protocol);
-}
+ dev_change_flags(dev, dev->flags | IFF_UP);
+ rtnl_unlock();
-static const char *gre_get_name(const struct vport *vport)
-{
- return vport_priv(vport);
+ return vport;
}
static struct vport *gre_create(const struct vport_parms *parms)
{
- struct net *net = ovs_dp_get_net(parms->dp);
- struct ovs_net *ovs_net;
struct vport *vport;
- int err;
-
- err = gre_init();
- if (err)
- return ERR_PTR(err);
-
- ovs_net = net_generic(net, ovs_net_id);
- if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
- vport = ERR_PTR(-EEXIST);
- goto error;
- }
- vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
+ vport = gre_tnl_create(parms);
if (IS_ERR(vport))
- goto error;
-
- strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
- rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
- return vport;
-
-error:
- gre_exit();
- return vport;
-}
-
-static void gre_tnl_destroy(struct vport *vport)
-{
- struct net *net = ovs_dp_get_net(vport->dp);
- struct ovs_net *ovs_net;
-
- ovs_net = net_generic(net, ovs_net_id);
+ return vport;
- RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
- ovs_vport_deferred_free(vport);
- gre_exit();
+ return ovs_netdev_link(vport, parms->name);
}
static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *egress_tun_info)
+ struct ip_tunnel_info *egress_tun_info)
{
return ovs_tunnel_get_egress_info(egress_tun_info,
ovs_dp_get_net(vport->dp),
@@ -288,10 +96,9 @@ static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
static struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.create = gre_create,
- .destroy = gre_tnl_destroy,
- .get_name = gre_get_name,
- .send = gre_tnl_send,
+ .send = ovs_netdev_send,
.get_egress_tun_info = gre_get_egress_tun_info,
+ .destroy = ovs_netdev_tunnel_destroy,
.owner = THIS_MODULE,
};
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 6a55f7105505..c058bbf876c3 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -156,49 +156,44 @@ static void do_setup(struct net_device *netdev)
static struct vport *internal_dev_create(const struct vport_parms *parms)
{
struct vport *vport;
- struct netdev_vport *netdev_vport;
struct internal_dev *internal_dev;
int err;
- vport = ovs_vport_alloc(sizeof(struct netdev_vport),
- &ovs_internal_vport_ops, parms);
+ vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
}
- netdev_vport = netdev_vport_priv(vport);
-
- netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
- parms->name, NET_NAME_UNKNOWN,
- do_setup);
- if (!netdev_vport->dev) {
+ vport->dev = alloc_netdev(sizeof(struct internal_dev),
+ parms->name, NET_NAME_UNKNOWN, do_setup);
+ if (!vport->dev) {
err = -ENOMEM;
goto error_free_vport;
}
- dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
- internal_dev = internal_dev_priv(netdev_vport->dev);
+ dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
+ internal_dev = internal_dev_priv(vport->dev);
internal_dev->vport = vport;
/* Restrict bridge port to current netns. */
if (vport->port_no == OVSP_LOCAL)
- netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
+ vport->dev->features |= NETIF_F_NETNS_LOCAL;
rtnl_lock();
- err = register_netdevice(netdev_vport->dev);
+ err = register_netdevice(vport->dev);
if (err)
goto error_free_netdev;
- dev_set_promiscuity(netdev_vport->dev, 1);
+ dev_set_promiscuity(vport->dev, 1);
rtnl_unlock();
- netif_start_queue(netdev_vport->dev);
+ netif_start_queue(vport->dev);
return vport;
error_free_netdev:
rtnl_unlock();
- free_netdev(netdev_vport->dev);
+ free_netdev(vport->dev);
error_free_vport:
ovs_vport_free(vport);
error:
@@ -207,21 +202,19 @@ error:
static void internal_dev_destroy(struct vport *vport)
{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
- netif_stop_queue(netdev_vport->dev);
+ netif_stop_queue(vport->dev);
rtnl_lock();
- dev_set_promiscuity(netdev_vport->dev, -1);
+ dev_set_promiscuity(vport->dev, -1);
/* unregister_netdevice() waits for an RCU grace period. */
- unregister_netdevice(netdev_vport->dev);
+ unregister_netdevice(vport->dev);
rtnl_unlock();
}
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
{
- struct net_device *netdev = netdev_vport_priv(vport)->dev;
+ struct net_device *netdev = vport->dev;
int len;
if (unlikely(!(netdev->flags & IFF_UP))) {
@@ -249,7 +242,6 @@ static struct vport_ops ovs_internal_vport_ops = {
.type = OVS_VPORT_TYPE_INTERNAL,
.create = internal_dev_create,
.destroy = internal_dev_destroy,
- .get_name = ovs_netdev_get_name,
.send = internal_dev_recv,
};
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 33e6d6e2908f..4b70aaa4a746 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -26,10 +26,13 @@
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/openvswitch.h>
+#include <linux/export.h>
-#include <net/llc.h>
+#include <net/ip_tunnels.h>
+#include <net/rtnetlink.h>
#include "datapath.h"
+#include "vport.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
@@ -54,7 +57,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
skb_push(skb, ETH_HLEN);
ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
- ovs_vport_receive(vport, skb, NULL);
+ ovs_vport_receive(vport, skb, skb_tunnel_info(skb, AF_INET));
return;
error:
@@ -83,105 +86,112 @@ static struct net_device *get_dpdev(const struct datapath *dp)
local = ovs_vport_ovsl(dp, OVSP_LOCAL);
BUG_ON(!local);
- return netdev_vport_priv(local)->dev;
+ return local->dev;
}
-static struct vport *netdev_create(const struct vport_parms *parms)
+struct vport *ovs_netdev_link(struct vport *vport, const char *name)
{
- struct vport *vport;
- struct netdev_vport *netdev_vport;
int err;
- vport = ovs_vport_alloc(sizeof(struct netdev_vport),
- &ovs_netdev_vport_ops, parms);
- if (IS_ERR(vport)) {
- err = PTR_ERR(vport);
- goto error;
- }
-
- netdev_vport = netdev_vport_priv(vport);
-
- netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
- if (!netdev_vport->dev) {
+ vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
+ if (!vport->dev) {
err = -ENODEV;
goto error_free_vport;
}
- if (netdev_vport->dev->flags & IFF_LOOPBACK ||
- netdev_vport->dev->type != ARPHRD_ETHER ||
- ovs_is_internal_dev(netdev_vport->dev)) {
+ if (vport->dev->flags & IFF_LOOPBACK ||
+ vport->dev->type != ARPHRD_ETHER ||
+ ovs_is_internal_dev(vport->dev)) {
err = -EINVAL;
goto error_put;
}
rtnl_lock();
- err = netdev_master_upper_dev_link(netdev_vport->dev,
+ err = netdev_master_upper_dev_link(vport->dev,
get_dpdev(vport->dp));
if (err)
goto error_unlock;
- err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+ err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
vport);
if (err)
goto error_master_upper_dev_unlink;
- dev_disable_lro(netdev_vport->dev);
- dev_set_promiscuity(netdev_vport->dev, 1);
- netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+ dev_disable_lro(vport->dev);
+ dev_set_promiscuity(vport->dev, 1);
+ vport->dev->priv_flags |= IFF_OVS_DATAPATH;
rtnl_unlock();
return vport;
error_master_upper_dev_unlink:
- netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
+ netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
error_unlock:
rtnl_unlock();
error_put:
- dev_put(netdev_vport->dev);
+ dev_put(vport->dev);
error_free_vport:
ovs_vport_free(vport);
-error:
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(ovs_netdev_link);
-static void free_port_rcu(struct rcu_head *rcu)
+static struct vport *netdev_create(const struct vport_parms *parms)
{
- struct netdev_vport *netdev_vport = container_of(rcu,
- struct netdev_vport, rcu);
+ struct vport *vport;
- dev_put(netdev_vport->dev);
- ovs_vport_free(vport_from_priv(netdev_vport));
+ vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
+ if (IS_ERR(vport))
+ return vport;
+
+ return ovs_netdev_link(vport, parms->name);
}
-void ovs_netdev_detach_dev(struct vport *vport)
+static void vport_netdev_free(struct rcu_head *rcu)
{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+ struct vport *vport = container_of(rcu, struct vport, rcu);
+ if (vport->dev)
+ dev_put(vport->dev);
+ ovs_vport_free(vport);
+}
+
+void ovs_netdev_detach_dev(struct vport *vport)
+{
ASSERT_RTNL();
- netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
- netdev_rx_handler_unregister(netdev_vport->dev);
- netdev_upper_dev_unlink(netdev_vport->dev,
- netdev_master_upper_dev_get(netdev_vport->dev));
- dev_set_promiscuity(netdev_vport->dev, -1);
+ vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+ netdev_rx_handler_unregister(vport->dev);
+ netdev_upper_dev_unlink(vport->dev,
+ netdev_master_upper_dev_get(vport->dev));
+ dev_set_promiscuity(vport->dev, -1);
}
+EXPORT_SYMBOL_GPL(ovs_netdev_detach_dev);
static void netdev_destroy(struct vport *vport)
{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
rtnl_lock();
- if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
+ if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
ovs_netdev_detach_dev(vport);
rtnl_unlock();
- call_rcu(&netdev_vport->rcu, free_port_rcu);
+ call_rcu(&vport->rcu, vport_netdev_free);
}
-const char *ovs_netdev_get_name(const struct vport *vport)
+void ovs_netdev_tunnel_destroy(struct vport *vport)
{
- const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- return netdev_vport->dev->name;
+ rtnl_lock();
+ if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
+ ovs_netdev_detach_dev(vport);
+
+ /* Early release so we can unregister the device */
+ dev_put(vport->dev);
+ rtnl_delete_link(vport->dev);
+ vport->dev = NULL;
+ rtnl_unlock();
+
+ call_rcu(&vport->rcu, vport_netdev_free);
}
+EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
static unsigned int packet_length(const struct sk_buff *skb)
{
@@ -193,20 +203,19 @@ static unsigned int packet_length(const struct sk_buff *skb)
return length;
}
-static int netdev_send(struct vport *vport, struct sk_buff *skb)
+int ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- int mtu = netdev_vport->dev->mtu;
+ int mtu = vport->dev->mtu;
int len;
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
- netdev_vport->dev->name,
+ vport->dev->name,
packet_length(skb), mtu);
goto drop;
}
- skb->dev = netdev_vport->dev;
+ skb->dev = vport->dev;
len = skb->len;
dev_queue_xmit(skb);
@@ -216,6 +225,7 @@ drop:
kfree_skb(skb);
return 0;
}
+EXPORT_SYMBOL_GPL(ovs_netdev_send);
/* Returns null if this device is not attached to a datapath. */
struct vport *ovs_netdev_get_vport(struct net_device *dev)
@@ -231,8 +241,7 @@ static struct vport_ops ovs_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_NETDEV,
.create = netdev_create,
.destroy = netdev_destroy,
- .get_name = ovs_netdev_get_name,
- .send = netdev_send,
+ .send = ovs_netdev_send,
};
int __init ovs_netdev_init(void)
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index 6f7038e79c52..497cc81f1aca 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -26,22 +26,12 @@
struct vport *ovs_netdev_get_vport(struct net_device *dev);
-struct netdev_vport {
- struct rcu_head rcu;
-
- struct net_device *dev;
-};
-
-static inline struct netdev_vport *
-netdev_vport_priv(const struct vport *vport)
-{
- return vport_priv(vport);
-}
-
-const char *ovs_netdev_get_name(const struct vport *);
+struct vport *ovs_netdev_link(struct vport *vport, const char *name);
+int ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
void ovs_netdev_detach_dev(struct vport *);
int __init ovs_netdev_init(void);
void ovs_netdev_exit(void);
+void ovs_netdev_tunnel_destroy(struct vport *vport);
#endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 6d39766e7828..1e8b00a23a23 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -17,94 +17,37 @@
* 02110-1301, USA
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/net.h>
-#include <linux/rculist.h>
-#include <linux/udp.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/openvswitch.h>
#include <linux/module.h>
-
-#include <net/icmp.h>
-#include <net/ip.h>
#include <net/udp.h>
#include <net/ip_tunnels.h>
#include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
#include <net/vxlan.h>
#include "datapath.h"
#include "vport.h"
-#include "vport-vxlan.h"
-
-/**
- * struct vxlan_port - Keeps track of open UDP ports
- * @vs: vxlan_sock created for the port.
- * @name: vport name.
- */
-struct vxlan_port {
- struct vxlan_sock *vs;
- char name[IFNAMSIZ];
- u32 exts; /* VXLAN_F_* in <net/vxlan.h> */
-};
-
-static struct vport_ops ovs_vxlan_vport_ops;
+#include "vport-netdev.h"
-static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
-{
- return vport_priv(vport);
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
- struct vxlan_metadata *md)
-{
- struct ovs_tunnel_info tun_info;
- struct vxlan_port *vxlan_port;
- struct vport *vport = vs->data;
- struct iphdr *iph;
- struct ovs_vxlan_opts opts = {
- .gbp = md->gbp,
- };
- __be64 key;
- __be16 flags;
-
- flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
- vxlan_port = vxlan_vport(vport);
- if (vxlan_port->exts & VXLAN_F_GBP && md->gbp)
- flags |= TUNNEL_VXLAN_OPT;
-
- /* Save outer tunnel values */
- iph = ip_hdr(skb);
- key = cpu_to_be64(ntohl(md->vni) >> 8);
- ovs_flow_tun_info_init(&tun_info, iph,
- udp_hdr(skb)->source, udp_hdr(skb)->dest,
- key, flags, &opts, sizeof(opts));
-
- ovs_vport_receive(vport, skb, &tun_info);
-}
+static struct vport_ops ovs_vxlan_netdev_vport_ops;
static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
{
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
- __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+ struct vxlan_dev *vxlan = netdev_priv(vport->dev);
+ __be16 dst_port = vxlan->cfg.dst_port;
if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
return -EMSGSIZE;
- if (vxlan_port->exts) {
+ if (vxlan->flags & VXLAN_F_GBP) {
struct nlattr *exts;
exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
if (!exts)
return -EMSGSIZE;
- if (vxlan_port->exts & VXLAN_F_GBP &&
+ if (vxlan->flags & VXLAN_F_GBP &&
nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
return -EMSGSIZE;
@@ -114,23 +57,14 @@ static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
return 0;
}
-static void vxlan_tnl_destroy(struct vport *vport)
-{
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
-
- vxlan_sock_release(vxlan_port->vs);
-
- ovs_vport_deferred_free(vport);
-}
-
-static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
+static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX + 1] = {
[OVS_VXLAN_EXT_GBP] = { .type = NLA_FLAG, },
};
-static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
+static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr,
+ struct vxlan_config *conf)
{
- struct nlattr *exts[OVS_VXLAN_EXT_MAX+1];
- struct vxlan_port *vxlan_port;
+ struct nlattr *exts[OVS_VXLAN_EXT_MAX + 1];
int err;
if (nla_len(attr) < sizeof(struct nlattr))
@@ -140,10 +74,8 @@ static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
if (err < 0)
return err;
- vxlan_port = vxlan_vport(vport);
-
if (exts[OVS_VXLAN_EXT_GBP])
- vxlan_port->exts |= VXLAN_F_GBP;
+ conf->flags |= VXLAN_F_GBP;
return 0;
}
@@ -152,128 +84,74 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
struct nlattr *options = parms->options;
- struct vxlan_port *vxlan_port;
- struct vxlan_sock *vs;
+ struct net_device *dev;
struct vport *vport;
struct nlattr *a;
- u16 dst_port;
int err;
+ struct vxlan_config conf = {
+ .no_share = true,
+ .flags = VXLAN_F_COLLECT_METADATA,
+ };
if (!options) {
err = -EINVAL;
goto error;
}
+
a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
if (a && nla_len(a) == sizeof(u16)) {
- dst_port = nla_get_u16(a);
+ conf.dst_port = htons(nla_get_u16(a));
} else {
/* Require destination port from userspace. */
err = -EINVAL;
goto error;
}
- vport = ovs_vport_alloc(sizeof(struct vxlan_port),
- &ovs_vxlan_vport_ops, parms);
+ vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms);
if (IS_ERR(vport))
return vport;
- vxlan_port = vxlan_vport(vport);
- strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
-
a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
if (a) {
- err = vxlan_configure_exts(vport, a);
+ err = vxlan_configure_exts(vport, a, &conf);
if (err) {
ovs_vport_free(vport);
goto error;
}
}
- vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true,
- vxlan_port->exts);
- if (IS_ERR(vs)) {
+ rtnl_lock();
+ dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf);
+ if (IS_ERR(dev)) {
+ rtnl_unlock();
ovs_vport_free(vport);
- return (void *)vs;
+ return ERR_CAST(dev);
}
- vxlan_port->vs = vs;
+ dev_change_flags(dev, dev->flags | IFF_UP);
+ rtnl_unlock();
return vport;
-
error:
return ERR_PTR(err);
}
-static int vxlan_ext_gbp(struct sk_buff *skb)
+static struct vport *vxlan_create(const struct vport_parms *parms)
{
- const struct ovs_tunnel_info *tun_info;
- const struct ovs_vxlan_opts *opts;
-
- tun_info = OVS_CB(skb)->egress_tun_info;
- opts = tun_info->options;
-
- if (tun_info->tunnel.tun_flags & TUNNEL_VXLAN_OPT &&
- tun_info->options_len >= sizeof(*opts))
- return opts->gbp;
- else
- return 0;
-}
-
-static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
- struct net *net = ovs_dp_get_net(vport->dp);
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
- struct sock *sk = vxlan_port->vs->sock->sk;
- __be16 dst_port = inet_sk(sk)->inet_sport;
- const struct ovs_key_ipv4_tunnel *tun_key;
- struct vxlan_metadata md = {0};
- struct rtable *rt;
- struct flowi4 fl;
- __be16 src_port;
- __be16 df;
- int err;
- u32 vxflags;
-
- if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
- err = -EINVAL;
- goto error;
- }
-
- tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
- rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
- if (IS_ERR(rt)) {
- err = PTR_ERR(rt);
- goto error;
- }
-
- df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
- htons(IP_DF) : 0;
+ struct vport *vport;
- skb->ignore_df = 1;
+ vport = vxlan_tnl_create(parms);
+ if (IS_ERR(vport))
+ return vport;
- src_port = udp_flow_src_port(net, skb, 0, 0, true);
- md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
- md.gbp = vxlan_ext_gbp(skb);
- vxflags = vxlan_port->exts |
- (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
-
- err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst,
- tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
- src_port, dst_port,
- &md, false, vxflags);
- if (err < 0)
- ip_rt_put(rt);
- return err;
-error:
- kfree_skb(skb);
- return err;
+ return ovs_netdev_link(vport, parms->name);
}
static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *egress_tun_info)
+ struct ip_tunnel_info *egress_tun_info)
{
+ struct vxlan_dev *vxlan = netdev_priv(vport->dev);
struct net *net = ovs_dp_get_net(vport->dp);
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
- __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+ __be16 dst_port = vxlan_dev_dst_port(vxlan);
__be16 src_port;
int port_min;
int port_max;
@@ -287,31 +165,23 @@ static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
src_port, dst_port);
}
-static const char *vxlan_get_name(const struct vport *vport)
-{
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
- return vxlan_port->name;
-}
-
-static struct vport_ops ovs_vxlan_vport_ops = {
- .type = OVS_VPORT_TYPE_VXLAN,
- .create = vxlan_tnl_create,
- .destroy = vxlan_tnl_destroy,
- .get_name = vxlan_get_name,
- .get_options = vxlan_get_options,
- .send = vxlan_tnl_send,
+static struct vport_ops ovs_vxlan_netdev_vport_ops = {
+ .type = OVS_VPORT_TYPE_VXLAN,
+ .create = vxlan_create,
+ .destroy = ovs_netdev_tunnel_destroy,
+ .get_options = vxlan_get_options,
+ .send = ovs_netdev_send,
.get_egress_tun_info = vxlan_get_egress_tun_info,
- .owner = THIS_MODULE,
};
static int __init ovs_vxlan_tnl_init(void)
{
- return ovs_vport_ops_register(&ovs_vxlan_vport_ops);
+ return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops);
}
static void __exit ovs_vxlan_tnl_exit(void)
{
- ovs_vport_ops_unregister(&ovs_vxlan_vport_ops);
+ ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops);
}
module_init(ovs_vxlan_tnl_init);
diff --git a/net/openvswitch/vport-vxlan.h b/net/openvswitch/vport-vxlan.h
deleted file mode 100644
index 4b08233e73d5..000000000000
--- a/net/openvswitch/vport-vxlan.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef VPORT_VXLAN_H
-#define VPORT_VXLAN_H 1
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct ovs_vxlan_opts {
- __u32 gbp;
-};
-
-#endif
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 067a3fff1d2c..d14f59403c5e 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -113,7 +113,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
struct vport *vport;
hlist_for_each_entry_rcu(vport, bucket, hash_node)
- if (!strcmp(name, vport->ops->get_name(vport)) &&
+ if (!strcmp(name, ovs_vport_name(vport)) &&
net_eq(ovs_dp_get_net(vport->dp), net))
return vport;
@@ -226,7 +226,7 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
}
bucket = hash_bucket(ovs_dp_get_net(vport->dp),
- vport->ops->get_name(vport));
+ ovs_vport_name(vport));
hlist_add_head_rcu(&vport->hash_node, bucket);
return vport;
}
@@ -469,7 +469,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
* skb->data should point to the Ethernet header.
*/
void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
- const struct ovs_tunnel_info *tun_info)
+ const struct ip_tunnel_info *tun_info)
{
struct pcpu_sw_netstats *stats;
struct sw_flow_key key;
@@ -572,22 +572,22 @@ void ovs_vport_deferred_free(struct vport *vport)
}
EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info,
struct net *net,
- const struct ovs_tunnel_info *tun_info,
+ const struct ip_tunnel_info *tun_info,
u8 ipproto,
u32 skb_mark,
__be16 tp_src,
__be16 tp_dst)
{
- const struct ovs_key_ipv4_tunnel *tun_key;
+ const struct ip_tunnel_key *tun_key;
struct rtable *rt;
struct flowi4 fl;
if (unlikely(!tun_info))
return -EINVAL;
- tun_key = &tun_info->tunnel;
+ tun_key = &tun_info->key;
/* Route lookup to get srouce IP address.
* The process may need to be changed if the corresponding process
@@ -602,22 +602,22 @@ int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
/* Generate egress_tun_info based on tun_info,
* saddr, tp_src and tp_dst
*/
- __ovs_flow_tun_info_init(egress_tun_info,
- fl.saddr, tun_key->ipv4_dst,
- tun_key->ipv4_tos,
- tun_key->ipv4_ttl,
- tp_src, tp_dst,
- tun_key->tun_id,
- tun_key->tun_flags,
- tun_info->options,
- tun_info->options_len);
+ __ip_tunnel_info_init(egress_tun_info,
+ fl.saddr, tun_key->ipv4_dst,
+ tun_key->ipv4_tos,
+ tun_key->ipv4_ttl,
+ tp_src, tp_dst,
+ tun_key->tun_id,
+ tun_key->tun_flags,
+ tun_info->options,
+ tun_info->options_len);
return 0;
}
EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *info)
+ struct ip_tunnel_info *info)
{
/* get_egress_tun_info() is only implemented on tunnel ports. */
if (unlikely(!vport->ops->get_egress_tun_info))
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index bc85331a6c60..1a689c28b5a6 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -27,6 +27,7 @@
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/u64_stats_sync.h>
+#include <net/route.h>
#include "datapath.h"
@@ -58,15 +59,15 @@ u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
int ovs_vport_send(struct vport *, struct sk_buff *);
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info,
struct net *net,
- const struct ovs_tunnel_info *tun_info,
+ const struct ip_tunnel_info *tun_info,
u8 ipproto,
u32 skb_mark,
__be16 tp_src,
__be16 tp_dst);
int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *info);
+ struct ip_tunnel_info *info);
/* The following definitions are for implementers of vport devices: */
@@ -106,7 +107,7 @@ struct vport_portids {
* @detach_list: list used for detaching vport in net-exit call.
*/
struct vport {
- struct rcu_head rcu;
+ struct net_device *dev;
struct datapath *dp;
struct vport_portids __rcu *upcall_portids;
u16 port_no;
@@ -119,6 +120,7 @@ struct vport {
struct vport_err_stats err_stats;
struct list_head detach_list;
+ struct rcu_head rcu;
};
/**
@@ -176,7 +178,7 @@ struct vport_ops {
int (*send)(struct vport *, struct sk_buff *);
int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
- struct ovs_tunnel_info *);
+ struct ip_tunnel_info *);
struct module *owner;
struct list_head list;
@@ -226,7 +228,7 @@ static inline struct vport *vport_from_priv(void *priv)
}
void ovs_vport_receive(struct vport *, struct sk_buff *,
- const struct ovs_tunnel_info *);
+ const struct ip_tunnel_info *);
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
@@ -235,11 +237,16 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
}
+static inline const char *ovs_vport_name(struct vport *vport)
+{
+ return vport->dev ? vport->dev->name : vport->ops->get_name(vport);
+}
+
int ovs_vport_ops_register(struct vport_ops *ops);
void ovs_vport_ops_unregister(struct vport_ops *ops);
static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
- const struct ovs_key_ipv4_tunnel *key,
+ const struct ip_tunnel_key *key,
u32 mark,
struct flowi4 *fl,
u8 protocol)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ed458b315ef4..b5afe538bb88 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -518,13 +518,11 @@ static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
}
static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
- int tx_ring,
struct sk_buff_head *rb_queue)
{
struct tpacket_kbdq_core *pkc;
- pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
- GET_PBDQC_FROM_RB(&po->rx_ring);
+ pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
spin_lock_bh(&rb_queue->lock);
pkc->delete_blk_timer = 1;
@@ -4043,7 +4041,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
if (closing && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */
if (!tx_ring)
- prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+ prb_shutdown_retire_blk_timer(po, rb_queue);
}
release_sock(sk);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 4ebd29c128b6..dd666fb9b4e1 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -185,7 +185,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
ret = 0;
goto out;
}
- trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
+ trans = rds_trans_get_preferred(sock_net(sock->sk),
+ sin->sin_addr.s_addr);
if (!trans) {
ret = -EADDRNOTAVAIL;
rds_remove_bound(rs);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index da6da57e5f36..d4fecb21ca25 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -117,7 +117,8 @@ static void rds_conn_reset(struct rds_connection *conn)
* For now they are not garbage collected once they're created. They
* are torn down as the module is removed, if ever.
*/
-static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
+static struct rds_connection *__rds_conn_create(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp,
int is_outgoing)
{
@@ -157,6 +158,7 @@ new_conn:
conn->c_faddr = faddr;
spin_lock_init(&conn->c_lock);
conn->c_next_tx_seq = 1;
+ rds_conn_net_set(conn, net);
init_waitqueue_head(&conn->c_waitq);
INIT_LIST_HEAD(&conn->c_send_queue);
@@ -174,7 +176,7 @@ new_conn:
* can bind to the destination address then we'd rather the messages
* flow through loopback rather than either transport.
*/
- loop_trans = rds_trans_get_preferred(faddr);
+ loop_trans = rds_trans_get_preferred(net, faddr);
if (loop_trans) {
rds_trans_put(loop_trans);
conn->c_loopback = 1;
@@ -260,17 +262,19 @@ out:
return conn;
}
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp)
{
- return __rds_conn_create(laddr, faddr, trans, gfp, 0);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
}
EXPORT_SYMBOL_GPL(rds_conn_create);
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp)
{
- return __rds_conn_create(laddr, faddr, trans, gfp, 1);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
}
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ba2dffeff608..13814227b3b2 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -317,7 +317,7 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len,
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
-static int rds_ib_laddr_check(__be32 addr)
+static int rds_ib_laddr_check(struct net *net, __be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 0da2a45b33bd..f40d8f52b753 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -448,8 +448,9 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
(unsigned long long)be64_to_cpu(lguid),
(unsigned long long)be64_to_cpu(fguid));
- conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport,
- GFP_KERNEL);
+ /* RDS/IB is not currently netns aware, thus init_net */
+ conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+ &rds_ib_transport, GFP_KERNEL);
if (IS_ERR(conn)) {
rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
conn = NULL;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 589935661d66..5d5a9d258658 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -218,7 +218,7 @@ static void rds_iw_ic_info(struct socket *sock, unsigned int len,
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
-static int rds_iw_laddr_check(__be32 addr)
+static int rds_iw_laddr_check(struct net *net, __be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 8f486fa32079..a6553a6fb2bc 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -398,8 +398,9 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
&dp->dp_saddr, &dp->dp_daddr,
RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
- conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_iw_transport,
- GFP_KERNEL);
+ /* RDS/IW is not currently netns aware, thus init_net */
+ conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+ &rds_iw_transport, GFP_KERNEL);
if (IS_ERR(conn)) {
rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
conn = NULL;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 2260c1e434b1..9005fb0586f6 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -128,8 +128,21 @@ struct rds_connection {
/* Protocol version */
unsigned int c_version;
+ possible_net_t c_net;
};
+static inline
+struct net *rds_conn_net(struct rds_connection *conn)
+{
+ return read_pnet(&conn->c_net);
+}
+
+static inline
+void rds_conn_net_set(struct rds_connection *conn, struct net *net)
+{
+ write_pnet(&conn->c_net, net);
+}
+
#define RDS_FLAG_CONG_BITMAP 0x01
#define RDS_FLAG_ACK_REQUIRED 0x02
#define RDS_FLAG_RETRANSMITTED 0x04
@@ -417,7 +430,7 @@ struct rds_transport {
unsigned int t_prefer_loopback:1;
unsigned int t_type;
- int (*laddr_check)(__be32 addr);
+ int (*laddr_check)(struct net *net, __be32 addr);
int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
void (*conn_free)(void *data);
int (*conn_connect)(struct rds_connection *conn);
@@ -608,9 +621,11 @@ struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
/* conn.c */
int rds_conn_init(void);
void rds_conn_exit(void);
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp);
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp);
void rds_conn_shutdown(struct rds_connection *conn);
void rds_conn_destroy(struct rds_connection *conn);
@@ -795,7 +810,7 @@ void rds_connect_complete(struct rds_connection *conn);
/* transport.c */
int rds_trans_register(struct rds_transport *trans);
void rds_trans_unregister(struct rds_transport *trans);
-struct rds_transport *rds_trans_get_preferred(__be32 addr);
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
void rds_trans_put(struct rds_transport *trans);
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
diff --git a/net/rds/send.c b/net/rds/send.c
index e9430f537f9c..2581b8e3dbe7 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1023,7 +1023,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
conn = rs->rs_conn;
else {
- conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
+ conn = rds_conn_create_outgoing(sock_net(sock->sk),
+ rs->rs_bound_addr, daddr,
rs->rs_transport,
sock->sk->sk_allocation);
if (IS_ERR(conn)) {
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index edac9ef2bc8b..c42b60bf4c68 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -35,6 +35,9 @@
#include <linux/in.h>
#include <linux/module.h>
#include <net/tcp.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/tcp.h>
#include "rds.h"
#include "tcp.h"
@@ -189,9 +192,9 @@ out:
spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
}
-static int rds_tcp_laddr_check(__be32 addr)
+static int rds_tcp_laddr_check(struct net *net, __be32 addr)
{
- if (inet_addr_type(&init_net, addr) == RTN_LOCAL)
+ if (inet_addr_type(net, addr) == RTN_LOCAL)
return 0;
return -EADDRNOTAVAIL;
}
@@ -250,16 +253,7 @@ static void rds_tcp_destroy_conns(void)
}
}
-static void rds_tcp_exit(void)
-{
- rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
- rds_tcp_listen_stop();
- rds_tcp_destroy_conns();
- rds_trans_unregister(&rds_tcp_transport);
- rds_tcp_recv_exit();
- kmem_cache_destroy(rds_tcp_conn_slab);
-}
-module_exit(rds_tcp_exit);
+static void rds_tcp_exit(void);
struct rds_transport rds_tcp_transport = {
.laddr_check = rds_tcp_laddr_check,
@@ -281,6 +275,136 @@ struct rds_transport rds_tcp_transport = {
.t_prefer_loopback = 1,
};
+static int rds_tcp_netid;
+
+/* per-network namespace private data for this module */
+struct rds_tcp_net {
+ struct socket *rds_tcp_listen_sock;
+ struct work_struct rds_tcp_accept_w;
+};
+
+static void rds_tcp_accept_worker(struct work_struct *work)
+{
+ struct rds_tcp_net *rtn = container_of(work,
+ struct rds_tcp_net,
+ rds_tcp_accept_w);
+
+ while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
+ cond_resched();
+}
+
+void rds_tcp_accept_work(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+ queue_work(rds_wq, &rtn->rds_tcp_accept_w);
+}
+
+static __net_init int rds_tcp_init_net(struct net *net)
+{
+ struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+ rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
+ if (!rtn->rds_tcp_listen_sock) {
+ pr_warn("could not set up listen sock\n");
+ return -EAFNOSUPPORT;
+ }
+ INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
+ return 0;
+}
+
+static void __net_exit rds_tcp_exit_net(struct net *net)
+{
+ struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+ /* If rds_tcp_exit_net() is called as a result of netns deletion,
+ * the rds_tcp_kill_sock() device notifier would already have cleaned
+ * up the listen socket, thus there is no work to do in this function.
+ *
+ * If rds_tcp_exit_net() is called as a result of module unload,
+ * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
+ * we do need to clean up the listen socket here.
+ */
+ if (rtn->rds_tcp_listen_sock) {
+ rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+ rtn->rds_tcp_listen_sock = NULL;
+ flush_work(&rtn->rds_tcp_accept_w);
+ }
+}
+
+static struct pernet_operations rds_tcp_net_ops = {
+ .init = rds_tcp_init_net,
+ .exit = rds_tcp_exit_net,
+ .id = &rds_tcp_netid,
+ .size = sizeof(struct rds_tcp_net),
+};
+
+static void rds_tcp_kill_sock(struct net *net)
+{
+ struct rds_tcp_connection *tc, *_tc;
+ struct sock *sk;
+ LIST_HEAD(tmp_list);
+ struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+ rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+ rtn->rds_tcp_listen_sock = NULL;
+ flush_work(&rtn->rds_tcp_accept_w);
+ spin_lock_irq(&rds_tcp_conn_lock);
+ list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+ struct net *c_net = read_pnet(&tc->conn->c_net);
+
+ if (net != c_net || !tc->t_sock)
+ continue;
+ list_move_tail(&tc->t_tcp_node, &tmp_list);
+ }
+ spin_unlock_irq(&rds_tcp_conn_lock);
+ list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
+ sk = tc->t_sock->sk;
+ sk->sk_prot->disconnect(sk, 0);
+ tcp_done(sk);
+ if (tc->conn->c_passive)
+ rds_conn_destroy(tc->conn->c_passive);
+ rds_conn_destroy(tc->conn);
+ }
+}
+
+static int rds_tcp_dev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ /* rds-tcp registers as a pernet subys, so the ->exit will only
+ * get invoked after network acitivity has quiesced. We need to
+ * clean up all sockets to quiesce network activity, and use
+ * the unregistration of the per-net loopback device as a trigger
+ * to start that cleanup.
+ */
+ if (event == NETDEV_UNREGISTER_FINAL &&
+ dev->ifindex == LOOPBACK_IFINDEX)
+ rds_tcp_kill_sock(dev_net(dev));
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rds_tcp_dev_notifier = {
+ .notifier_call = rds_tcp_dev_event,
+ .priority = -10, /* must be called after other network notifiers */
+};
+
+static void rds_tcp_exit(void)
+{
+ rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
+ unregister_pernet_subsys(&rds_tcp_net_ops);
+ if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
+ pr_warn("could not unregister rds_tcp_dev_notifier\n");
+ rds_tcp_destroy_conns();
+ rds_trans_unregister(&rds_tcp_transport);
+ rds_tcp_recv_exit();
+ kmem_cache_destroy(rds_tcp_conn_slab);
+}
+module_exit(rds_tcp_exit);
+
static int rds_tcp_init(void)
{
int ret;
@@ -293,6 +417,16 @@ static int rds_tcp_init(void)
goto out;
}
+ ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
+ if (ret) {
+ pr_warn("could not register rds_tcp_dev_notifier\n");
+ goto out;
+ }
+
+ ret = register_pernet_subsys(&rds_tcp_net_ops);
+ if (ret)
+ goto out_slab;
+
ret = rds_tcp_recv_init();
if (ret)
goto out_slab;
@@ -301,19 +435,14 @@ static int rds_tcp_init(void)
if (ret)
goto out_recv;
- ret = rds_tcp_listen_init();
- if (ret)
- goto out_register;
-
rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
goto out;
-out_register:
- rds_trans_unregister(&rds_tcp_transport);
out_recv:
rds_tcp_recv_exit();
out_slab:
+ unregister_pernet_subsys(&rds_tcp_net_ops);
kmem_cache_destroy(rds_tcp_conn_slab);
out:
return ret;
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 0dbdd37162da..64f873c0c6b6 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -52,6 +52,7 @@ u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
extern struct rds_transport rds_tcp_transport;
+void rds_tcp_accept_work(struct sock *sk);
/* tcp_connect.c */
int rds_tcp_conn_connect(struct rds_connection *conn);
@@ -59,9 +60,11 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn);
void rds_tcp_state_change(struct sock *sk);
/* tcp_listen.c */
-int rds_tcp_listen_init(void);
-void rds_tcp_listen_stop(void);
+struct socket *rds_tcp_listen_init(struct net *);
+void rds_tcp_listen_stop(struct socket *);
void rds_tcp_listen_data_ready(struct sock *sk);
+int rds_tcp_accept_one(struct socket *sock);
+int rds_tcp_keepalive(struct socket *sock);
/* tcp_recv.c */
int rds_tcp_recv_init(void);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 973109c7b8e8..5cb16875c460 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -79,7 +79,8 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
struct sockaddr_in src, dest;
int ret;
- ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ ret = sock_create_kern(rds_conn_net(conn), PF_INET,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0)
goto out;
@@ -111,10 +112,12 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)
ret = 0;
- if (ret == 0)
+ if (ret == 0) {
+ rds_tcp_keepalive(sock);
sock = NULL;
- else
+ } else {
rds_tcp_restore_callbacks(sock, conn->c_transport_data);
+ }
out:
if (sock)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0da49e34495f..444d78d0bd77 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -38,14 +38,7 @@
#include "rds.h"
#include "tcp.h"
-/*
- * cheesy, but simple..
- */
-static void rds_tcp_accept_worker(struct work_struct *work);
-static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
-static struct socket *rds_tcp_listen_sock;
-
-static int rds_tcp_keepalive(struct socket *sock)
+int rds_tcp_keepalive(struct socket *sock)
{
/* values below based on xs_udp_default_timeout */
int keepidle = 5; /* send a probe 'keepidle' secs after last data */
@@ -77,7 +70,7 @@ bail:
return ret;
}
-static int rds_tcp_accept_one(struct socket *sock)
+int rds_tcp_accept_one(struct socket *sock)
{
struct socket *new_sock = NULL;
struct rds_connection *conn;
@@ -85,8 +78,9 @@ static int rds_tcp_accept_one(struct socket *sock)
struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp;
- ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
- sock->sk->sk_protocol, &new_sock);
+ ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+ sock->sk->sk_type, sock->sk->sk_protocol,
+ &new_sock);
if (ret)
goto out;
@@ -108,7 +102,8 @@ static int rds_tcp_accept_one(struct socket *sock)
&inet->inet_saddr, ntohs(inet->inet_sport),
&inet->inet_daddr, ntohs(inet->inet_dport));
- conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
+ conn = rds_conn_create(sock_net(sock->sk),
+ inet->inet_saddr, inet->inet_daddr,
&rds_tcp_transport, GFP_KERNEL);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
@@ -148,12 +143,6 @@ out:
return ret;
}
-static void rds_tcp_accept_worker(struct work_struct *work)
-{
- while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
- cond_resched();
-}
-
void rds_tcp_listen_data_ready(struct sock *sk)
{
void (*ready)(struct sock *sk);
@@ -174,20 +163,20 @@ void rds_tcp_listen_data_ready(struct sock *sk)
* socket
*/
if (sk->sk_state == TCP_LISTEN)
- queue_work(rds_wq, &rds_tcp_listen_work);
+ rds_tcp_accept_work(sk);
out:
read_unlock(&sk->sk_callback_lock);
ready(sk);
}
-int rds_tcp_listen_init(void)
+struct socket *rds_tcp_listen_init(struct net *net)
{
struct sockaddr_in sin;
struct socket *sock = NULL;
int ret;
- ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0)
goto out;
@@ -211,17 +200,15 @@ int rds_tcp_listen_init(void)
if (ret < 0)
goto out;
- rds_tcp_listen_sock = sock;
- sock = NULL;
+ return sock;
out:
if (sock)
sock_release(sock);
- return ret;
+ return NULL;
}
-void rds_tcp_listen_stop(void)
+void rds_tcp_listen_stop(struct socket *sock)
{
- struct socket *sock = rds_tcp_listen_sock;
struct sock *sk;
if (!sock)
@@ -242,5 +229,4 @@ void rds_tcp_listen_stop(void)
/* wait for accepts to stop and close the socket */
flush_workqueue(rds_wq);
sock_release(sock);
- rds_tcp_listen_sock = NULL;
}
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 83498e1c75b8..f3afd1d60d3c 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -77,7 +77,7 @@ void rds_trans_put(struct rds_transport *trans)
module_put(trans->t_owner);
}
-struct rds_transport *rds_trans_get_preferred(__be32 addr)
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr)
{
struct rds_transport *ret = NULL;
struct rds_transport *trans;
@@ -90,7 +90,7 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr)
for (i = 0; i < RDS_TRANS_COUNT; i++) {
trans = transports[i];
- if (trans && (trans->laddr_check(addr) == 0) &&
+ if (trans && (trans->laddr_check(net, addr) == 0) &&
(!trans->t_owner || try_module_get(trans->t_owner))) {
ret = trans;
break;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 43ec92680ae8..b087087ccfa9 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -27,6 +27,15 @@
#include <net/act_api.h>
#include <net/netlink.h>
+static void free_tcf(struct rcu_head *head)
+{
+ struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
+
+ free_percpu(p->cpu_bstats);
+ free_percpu(p->cpu_qstats);
+ kfree(p);
+}
+
void tcf_hash_destroy(struct tc_action *a)
{
struct tcf_common *p = a->priv;
@@ -41,7 +50,7 @@ void tcf_hash_destroy(struct tc_action *a)
* gen_estimator est_timer() might access p->tcfc_lock
* or bstats, wait a RCU grace period before freeing p
*/
- kfree_rcu(p, tcfc_rcu);
+ call_rcu(&p->tcfc_rcu, free_tcf);
}
EXPORT_SYMBOL(tcf_hash_destroy);
@@ -231,15 +240,16 @@ void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
if (est)
gen_kill_estimator(&pc->tcfc_bstats,
&pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
+ call_rcu(&pc->tcfc_rcu, free_tcf);
}
EXPORT_SYMBOL(tcf_hash_cleanup);
int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
- int size, int bind)
+ int size, int bind, bool cpustats)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = kzalloc(size, GFP_KERNEL);
+ int err = -ENOMEM;
if (unlikely(!p))
return -ENOMEM;
@@ -247,18 +257,32 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
if (bind)
p->tcfc_bindcnt = 1;
+ if (cpustats) {
+ p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+ if (!p->cpu_bstats) {
+err1:
+ kfree(p);
+ return err;
+ }
+ p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+ if (!p->cpu_qstats) {
+err2:
+ free_percpu(p->cpu_bstats);
+ goto err1;
+ }
+ }
spin_lock_init(&p->tcfc_lock);
INIT_HLIST_NODE(&p->tcfc_head);
p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies;
if (est) {
- int err = gen_new_estimator(&p->tcfc_bstats, NULL,
- &p->tcfc_rate_est,
- &p->tcfc_lock, est);
+ err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
+ &p->tcfc_rate_est,
+ &p->tcfc_lock, est);
if (err) {
- kfree(p);
- return err;
+ free_percpu(p->cpu_qstats);
+ goto err2;
}
}
@@ -616,10 +640,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (err < 0)
goto errout;
- if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
+ if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 ||
- gnet_stats_copy_queue(&d, NULL,
+ gnet_stats_copy_queue(&d, p->cpu_qstats,
&p->tcfc_qstats,
p->tcfc_qstats.qlen) < 0)
goto errout;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index d0edeb7a1950..1b97dabc621a 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -278,7 +278,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct tc_act_bpf *parm;
struct tcf_bpf *prog;
bool is_bpf, is_ebpf;
- int ret;
+ int ret, res = 0;
if (!nla)
return -EINVAL;
@@ -287,41 +287,43 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
if (ret < 0)
return ret;
- is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
- is_ebpf = tb[TCA_ACT_BPF_FD];
-
- if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
- !tb[TCA_ACT_BPF_PARMS])
+ if (!tb[TCA_ACT_BPF_PARMS])
return -EINVAL;
parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
- memset(&cfg, 0, sizeof(cfg));
-
- ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
- tcf_bpf_init_from_efd(tb, &cfg);
- if (ret < 0)
- return ret;
-
if (!tcf_hash_check(parm->index, act, bind)) {
ret = tcf_hash_create(parm->index, est, act,
- sizeof(*prog), bind);
+ sizeof(*prog), bind, false);
if (ret < 0)
- goto destroy_fp;
+ return ret;
- ret = ACT_P_CREATED;
+ res = ACT_P_CREATED;
} else {
/* Don't override defaults. */
if (bind)
- goto destroy_fp;
+ return 0;
tcf_hash_release(act, bind);
- if (!replace) {
- ret = -EEXIST;
- goto destroy_fp;
- }
+ if (!replace)
+ return -EEXIST;
}
+ is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
+ is_ebpf = tb[TCA_ACT_BPF_FD];
+
+ if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
+ tcf_bpf_init_from_efd(tb, &cfg);
+ if (ret < 0)
+ goto out;
+
prog = to_bpf(act);
spin_lock_bh(&prog->tcf_lock);
@@ -341,15 +343,16 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
spin_unlock_bh(&prog->tcf_lock);
- if (ret == ACT_P_CREATED)
+ if (res == ACT_P_CREATED)
tcf_hash_insert(act);
else
tcf_bpf_cfg_cleanup(&old);
- return ret;
+ return res;
+out:
+ if (res == ACT_P_CREATED)
+ tcf_hash_cleanup(act, est);
-destroy_fp:
- tcf_bpf_cfg_cleanup(&cfg);
return ret;
}
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 295d14bd6c67..f2b540220ad0 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -108,7 +108,8 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*ci),
+ bind, false);
if (ret)
return ret;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 4cd5cf1aedf8..b07c535ba8e7 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -62,7 +62,8 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
parm = nla_data(tb[TCA_CSUM_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+ bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 7fffc2272701..5c1b05170736 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -28,14 +28,18 @@
#ifdef CONFIG_GACT_PROB
static int gact_net_rand(struct tcf_gact *gact)
{
- if (!gact->tcfg_pval || prandom_u32() % gact->tcfg_pval)
+ smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+ if (prandom_u32() % gact->tcfg_pval)
return gact->tcf_action;
return gact->tcfg_paction;
}
static int gact_determ(struct tcf_gact *gact)
{
- if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval)
+ u32 pack = atomic_inc_return(&gact->packets);
+
+ smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+ if (pack % gact->tcfg_pval)
return gact->tcf_action;
return gact->tcfg_paction;
}
@@ -85,7 +89,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
#endif
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*gact),
+ bind, true);
if (ret)
return ret;
ret = ACT_P_CREATED;
@@ -99,16 +104,19 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
gact = to_gact(a);
- spin_lock_bh(&gact->tcf_lock);
+ ASSERT_RTNL();
gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
if (p_parm) {
gact->tcfg_paction = p_parm->paction;
- gact->tcfg_pval = p_parm->pval;
+ gact->tcfg_pval = max_t(u16, 1, p_parm->pval);
+ /* Make sure tcfg_pval is written before tcfg_ptype
+ * coupled with smp_rmb() in gact_net_rand() & gact_determ()
+ */
+ smp_wmb();
gact->tcfg_ptype = p_parm->ptype;
}
#endif
- spin_unlock_bh(&gact->tcf_lock);
if (ret == ACT_P_CREATED)
tcf_hash_insert(a);
return ret;
@@ -118,23 +126,21 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_gact *gact = a->priv;
- int action = TC_ACT_SHOT;
+ int action = READ_ONCE(gact->tcf_action);
- spin_lock(&gact->tcf_lock);
#ifdef CONFIG_GACT_PROB
- if (gact->tcfg_ptype)
- action = gact_rand[gact->tcfg_ptype](gact);
- else
- action = gact->tcf_action;
-#else
- action = gact->tcf_action;
+ {
+ u32 ptype = READ_ONCE(gact->tcfg_ptype);
+
+ if (ptype)
+ action = gact_rand[ptype](gact);
+ }
#endif
- gact->tcf_bstats.bytes += qdisc_pkt_len(skb);
- gact->tcf_bstats.packets++;
+ bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
if (action == TC_ACT_SHOT)
- gact->tcf_qstats.drops++;
- gact->tcf_tm.lastuse = jiffies;
- spin_unlock(&gact->tcf_lock);
+ qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
+
+ tcf_lastuse_update(&gact->tcf_tm);
return action;
}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index cbc8dd7dd48a..99c9cc1c7af9 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -114,7 +114,7 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
index = nla_get_u32(tb[TCA_IPT_INDEX]);
if (!tcf_hash_check(index, a, bind) ) {
- ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
+ ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 268545050ddb..2d1be4a760fd 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -35,9 +35,11 @@ static LIST_HEAD(mirred_list);
static void tcf_mirred_release(struct tc_action *a, int bind)
{
struct tcf_mirred *m = to_mirred(a);
+ struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
+
list_del(&m->tcfm_list);
- if (m->tcfm_dev)
- dev_put(m->tcfm_dev);
+ if (dev)
+ dev_put(dev);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -93,7 +95,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
if (!tcf_hash_check(parm->index, a, bind)) {
if (dev == NULL)
return -EINVAL;
- ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*m),
+ bind, true);
if (ret)
return ret;
ret = ACT_P_CREATED;
@@ -107,18 +110,18 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
}
m = to_mirred(a);
- spin_lock_bh(&m->tcf_lock);
+ ASSERT_RTNL();
m->tcf_action = parm->action;
m->tcfm_eaction = parm->eaction;
if (dev != NULL) {
m->tcfm_ifindex = parm->ifindex;
if (ret != ACT_P_CREATED)
- dev_put(m->tcfm_dev);
+ dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
dev_hold(dev);
- m->tcfm_dev = dev;
+ rcu_assign_pointer(m->tcfm_dev, dev);
m->tcfm_ok_push = ok_push;
}
- spin_unlock_bh(&m->tcf_lock);
+
if (ret == ACT_P_CREATED) {
list_add(&m->tcfm_list, &mirred_list);
tcf_hash_insert(a);
@@ -133,20 +136,22 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
struct tcf_mirred *m = a->priv;
struct net_device *dev;
struct sk_buff *skb2;
+ int retval, err;
u32 at;
- int retval, err = 1;
- spin_lock(&m->tcf_lock);
- m->tcf_tm.lastuse = jiffies;
- bstats_update(&m->tcf_bstats, skb);
+ tcf_lastuse_update(&m->tcf_tm);
+
+ bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
- dev = m->tcfm_dev;
- if (!dev) {
- printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+ rcu_read_lock();
+ retval = READ_ONCE(m->tcf_action);
+ dev = rcu_dereference(m->tcfm_dev);
+ if (unlikely(!dev)) {
+ pr_notice_once("tc mirred: target device is gone\n");
goto out;
}
- if (!(dev->flags & IFF_UP)) {
+ if (unlikely(!(dev->flags & IFF_UP))) {
net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
dev->name);
goto out;
@@ -154,7 +159,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
at = G_TC_AT(skb->tc_verd);
skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2 == NULL)
+ if (!skb2)
goto out;
if (!(at & AT_EGRESS)) {
@@ -170,16 +175,13 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
skb2->dev = dev;
err = dev_queue_xmit(skb2);
-out:
if (err) {
- m->tcf_qstats.overlimits++;
+out:
+ qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
retval = TC_ACT_SHOT;
- else
- retval = m->tcf_action;
- } else
- retval = m->tcf_action;
- spin_unlock(&m->tcf_lock);
+ }
+ rcu_read_unlock();
return retval;
}
@@ -218,14 +220,16 @@ static int mirred_device_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct tcf_mirred *m;
+ ASSERT_RTNL();
if (event == NETDEV_UNREGISTER)
list_for_each_entry(m, &mirred_list, tcfm_list) {
- spin_lock_bh(&m->tcf_lock);
- if (m->tcfm_dev == dev) {
+ if (rcu_access_pointer(m->tcfm_dev) == dev) {
dev_put(dev);
- m->tcfm_dev = NULL;
+ /* Note : no rcu grace period necessary, as
+ * net_device are already rcu protected.
+ */
+ RCU_INIT_POINTER(m->tcfm_dev, NULL);
}
- spin_unlock_bh(&m->tcf_lock);
}
return NOTIFY_DONE;
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 270a030d5fd0..5be0b3c1c5b0 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -55,7 +55,8 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
parm = nla_data(tb[TCA_NAT_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+ bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index ff8b466a73f6..e38a7701f154 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -57,7 +57,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (!tcf_hash_check(parm->index, a, bind)) {
if (!parm->nkeys)
return -EINVAL;
- ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+ bind, false);
if (ret)
return ret;
p = to_pedit(a);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 6a8d9488613a..d6b708d6afdf 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -103,7 +103,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
defdata = nla_data(tb[TCA_DEF_DATA]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+ bind, false);
if (ret)
return ret;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index fcfeeaf838be..6751b5f8c046 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -99,7 +99,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+ bind, false);
if (ret)
return ret;
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index d735ecf0b1a7..796785e0bf96 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -116,7 +116,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
action = parm->v_action;
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*v),
+ bind, false);
if (ret)
return ret;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index ea611b216412..4c85bd3a750c 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
- u32 classid;
-
- classid = task_cls_state(current)->classid;
-
- /*
- * Due to the nature of the classifier it is required to ignore all
- * packets originating from softirq context as accessing `current'
- * would lead to false results.
- *
- * This test assumes that all callers of dev_queue_xmit() explicitely
- * disable bh. Knowing this, it is possible to detect softirq based
- * calls by looking at the number of nested bh disable calls because
- * softirqs always disables bh.
- */
- if (in_serving_softirq()) {
- /* If there is an sk_classid we'll use that. */
- if (!skb->sk)
- return -1;
- classid = skb->sk->sk_classid;
- }
+ u32 classid = task_get_classid(skb);
if (!classid)
return -1;
-
if (!tcf_em_tree_match(skb, &head->ematches, NULL))
return -1;
res->classid = classid;
res->class = 0;
+
return tcf_exts_exec(skb, &head->exts, res);
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index b8d73bca683c..ffaeea63d473 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -186,7 +186,6 @@ struct qfq_sched {
u64 oldV, V; /* Precise virtual times. */
struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
- u32 num_active_agg; /* Num. of active aggregates */
u32 wsum; /* weight sum */
u32 iwsum; /* inverse weight sum */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 59e80356672b..4345790ad326 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -487,23 +487,35 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
*/
rcu_read_lock();
list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ struct net_device *odev;
+
if (!laddr->valid)
continue;
- if ((laddr->state == SCTP_ADDR_SRC) &&
- (AF_INET == laddr->a.sa.sa_family)) {
- fl4->fl4_sport = laddr->a.v4.sin_port;
- flowi4_update_output(fl4,
- asoc->base.sk->sk_bound_dev_if,
- RT_CONN_FLAGS(asoc->base.sk),
- daddr->v4.sin_addr.s_addr,
- laddr->a.v4.sin_addr.s_addr);
-
- rt = ip_route_output_key(sock_net(sk), fl4);
- if (!IS_ERR(rt)) {
- dst = &rt->dst;
- goto out_unlock;
- }
- }
+ if (laddr->state != SCTP_ADDR_SRC ||
+ AF_INET != laddr->a.sa.sa_family)
+ continue;
+
+ fl4->fl4_sport = laddr->a.v4.sin_port;
+ flowi4_update_output(fl4,
+ asoc->base.sk->sk_bound_dev_if,
+ RT_CONN_FLAGS(asoc->base.sk),
+ daddr->v4.sin_addr.s_addr,
+ laddr->a.v4.sin_addr.s_addr);
+
+ rt = ip_route_output_key(sock_net(sk), fl4);
+ if (IS_ERR(rt))
+ continue;
+
+ /* Ensure the src address belongs to the output
+ * interface.
+ */
+ odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
+ false);
+ if (!odev || odev->ifindex != fl4->flowi4_oif)
+ continue;
+
+ dst = &rt->dst;
+ break;
}
out_unlock:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 3ee27b7704ff..d7eaa7354cf7 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -853,7 +853,7 @@ nomem:
/*
* Respond to a normal COOKIE ACK chunk.
- * We are the side that is being asked for an association.
+ * We are the side that is asking for an association.
*
* RFC 2960 5.1 Normal Establishment of an Association
*
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 9f2add3cba26..16c1c43980a1 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -810,7 +810,7 @@ static int switchdev_port_fdb_dump_cb(struct net_device *dev,
ndm->ndm_flags = NTF_SELF;
ndm->ndm_type = 0;
ndm->ndm_ifindex = dev->ifindex;
- ndm->ndm_state = NUD_REACHABLE;
+ ndm->ndm_state = obj->u.fdb.ndm_state;
if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
goto nla_put_failure;
@@ -910,13 +910,9 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
if (switchdev_port_attr_get(dev, &attr))
return NULL;
- if (nhsel > 0) {
- if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
+ if (nhsel > 0 &&
+ !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
return NULL;
- if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
- attr.u.ppid.id_len))
- return NULL;
- }
prev_attr = attr;
}
@@ -1043,3 +1039,106 @@ void switchdev_fib_ipv4_abort(struct fib_info *fi)
fi->fib_net->ipv4.fib_offload_disabled = true;
}
EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
+
+static bool switchdev_port_same_parent_id(struct net_device *a,
+ struct net_device *b)
+{
+ struct switchdev_attr a_attr = {
+ .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+ .flags = SWITCHDEV_F_NO_RECURSE,
+ };
+ struct switchdev_attr b_attr = {
+ .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+ .flags = SWITCHDEV_F_NO_RECURSE,
+ };
+
+ if (switchdev_port_attr_get(a, &a_attr) ||
+ switchdev_port_attr_get(b, &b_attr))
+ return false;
+
+ return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
+}
+
+static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
+ struct net_device *group_dev)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+ if (lower_dev == dev)
+ continue;
+ if (switchdev_port_same_parent_id(dev, lower_dev))
+ return lower_dev->offload_fwd_mark;
+ return switchdev_port_fwd_mark_get(dev, lower_dev);
+ }
+
+ return dev->ifindex;
+}
+
+static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
+ u32 old_mark, u32 *reset_mark)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+ if (lower_dev->offload_fwd_mark == old_mark) {
+ if (!*reset_mark)
+ *reset_mark = lower_dev->ifindex;
+ lower_dev->offload_fwd_mark = *reset_mark;
+ }
+ switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
+ }
+}
+
+/**
+ * switchdev_port_fwd_mark_set - Set port offload forwarding mark
+ *
+ * @dev: port device
+ * @group_dev: containing device
+ * @joining: true if dev is joining group; false if leaving group
+ *
+ * An ungrouped port's offload mark is just its ifindex. A grouped
+ * port's (member of a bridge, for example) offload mark is the ifindex
+ * of one of the ports in the group with the same parent (switch) ID.
+ * Ports on the same device in the same group will have the same mark.
+ *
+ * Example:
+ *
+ * br0 ifindex=9
+ * sw1p1 ifindex=2 mark=2
+ * sw1p2 ifindex=3 mark=2
+ * sw2p1 ifindex=4 mark=5
+ * sw2p2 ifindex=5 mark=5
+ *
+ * If sw2p2 leaves the bridge, we'll have:
+ *
+ * br0 ifindex=9
+ * sw1p1 ifindex=2 mark=2
+ * sw1p2 ifindex=3 mark=2
+ * sw2p1 ifindex=4 mark=4
+ * sw2p2 ifindex=5 mark=5
+ */
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+ struct net_device *group_dev,
+ bool joining)
+{
+ u32 mark = dev->ifindex;
+ u32 reset_mark = 0;
+
+ if (group_dev && joining) {
+ mark = switchdev_port_fwd_mark_get(dev, group_dev);
+ } else if (group_dev && !joining) {
+ if (dev->offload_fwd_mark == mark)
+ /* Ohoh, this port was the mark reference port,
+ * but it's leaving the group, so reset the
+ * mark for the remaining ports in the group.
+ */
+ switchdev_port_fwd_mark_reset(group_dev, mark,
+ &reset_mark);
+ }
+
+ dev->offload_fwd_mark = mark;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a816382fc8af..8b010c976b2f 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -316,6 +316,29 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
}
}
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
+{
+ u16 last = msg_last_bcast(hdr);
+ int mtyp = msg_type(hdr);
+
+ if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
+ return;
+ if (mtyp == STATE_MSG) {
+ tipc_bclink_update_link_state(n, last);
+ return;
+ }
+ /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
+ * and transfer synch info in LINK_PROTOCOL messages.
+ */
+ if (tipc_node_is_up(n))
+ return;
+ if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
+ return;
+ n->bclink.last_sent = last;
+ n->bclink.last_in = last;
+ n->bclink.oos_state = 0;
+}
+
/**
* bclink_peek_nack - monitor retransmission requests sent by other nodes
*
@@ -358,10 +381,9 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
/* Prepare clone of message for local node */
skb = tipc_msg_reassemble(list);
- if (unlikely(!skb)) {
- __skb_queue_purge(list);
+ if (unlikely(!skb))
return -EHOSTUNREACH;
- }
+
/* Broadcast to all nodes */
if (likely(bclink)) {
tipc_bclink_lock(net);
@@ -413,7 +435,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
* all nodes in the cluster don't ACK at the same time
*/
if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
- tipc_link_proto_xmit(node->active_links[node->addr & 1],
+ tipc_link_proto_xmit(node_active_link(node, node->addr),
STATE_MSG, 0, 0, 0, 0);
tn->bcl->stats.sent_acks++;
}
@@ -925,7 +947,6 @@ int tipc_bclink_init(struct net *net)
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
bcl->bearer_id = MAX_BEARERS;
rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
- bcl->state = WORKING_WORKING;
bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
msg_set_prevnode(bcl->pmsg, tn->own_addr);
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 3c290a48f720..d74c69bcf60b 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -133,5 +133,6 @@ void tipc_bclink_wakeup_users(struct net *net);
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
void tipc_bclink_input(struct net *net);
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *msg);
#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 00bc0e620532..ce9f7bfc0b92 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -343,7 +343,7 @@ restart:
static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
{
pr_info("Resetting bearer <%s>\n", b_ptr->name);
- tipc_link_delete_list(net, b_ptr->identity);
+ tipc_node_delete_links(net, b_ptr->identity);
tipc_disc_reset(net, b_ptr);
return 0;
}
@@ -361,7 +361,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
pr_info("Disabling bearer <%s>\n", b_ptr->name);
b_ptr->media->disable_media(b_ptr);
- tipc_link_delete_list(net, b_ptr->identity);
+ tipc_node_delete_links(net, b_ptr->identity);
if (b_ptr->link_req)
tipc_disc_delete(b_ptr->link_req);
@@ -470,6 +470,32 @@ void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
rcu_read_unlock();
}
+/* tipc_bearer_xmit() -send buffer to destination over bearer
+ */
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr *dst)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_bearer *b;
+ struct sk_buff *skb, *tmp;
+
+ if (skb_queue_empty(xmitq))
+ return;
+
+ rcu_read_lock();
+ b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+ if (likely(b)) {
+ skb_queue_walk_safe(xmitq, skb, tmp) {
+ __skb_dequeue(xmitq);
+ b->media->send_msg(net, skb, b, dst);
+ /* Until we remove cloning in tipc_l2_send_msg(): */
+ kfree_skb(skb);
+ }
+ }
+ rcu_read_unlock();
+}
+
/**
* tipc_l2_rcv_msg - handle incoming TIPC message from an interface
* @buf: the received packet
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index dc714d977768..6426f242f626 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -217,5 +217,8 @@ void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
struct tipc_media_addr *dest);
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr *dst);
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 0fcf133d5cb7..b96b41eabf12 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -109,6 +109,11 @@ struct tipc_net {
atomic_t subscription_count;
};
+static inline struct tipc_net *tipc_net(struct net *net)
+{
+ return net_generic(net, tipc_net_id);
+}
+
static inline u16 mod(u16 x)
{
return x & 0xffffu;
@@ -129,6 +134,11 @@ static inline int less(u16 left, u16 right)
return less_eq(left, right) && (mod(right) != mod(left));
}
+static inline int in_range(u16 val, u16 min, u16 max)
+{
+ return !less(val, min) && !more(val, max);
+}
+
#ifdef CONFIG_SYSCTL
int tipc_register_sysctl(void);
void tipc_unregister_sysctl(void);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 967e292f53c8..d14e0a4aa9af 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -35,7 +35,7 @@
*/
#include "core.h"
-#include "link.h"
+#include "node.h"
#include "discover.h"
/* min delay during bearer start up */
@@ -120,30 +120,24 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
* @buf: buffer containing message
* @bearer: bearer that message arrived on
*/
-void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
struct tipc_bearer *bearer)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_node *node;
- struct tipc_link *link;
struct tipc_media_addr maddr;
- struct sk_buff *rbuf;
- struct tipc_msg *msg = buf_msg(buf);
- u32 ddom = msg_dest_domain(msg);
- u32 onode = msg_prevnode(msg);
- u32 net_id = msg_bc_netid(msg);
- u32 mtyp = msg_type(msg);
- u32 signature = msg_node_sig(msg);
- u16 caps = msg_node_capabilities(msg);
- bool addr_match = false;
- bool sign_match = false;
- bool link_up = false;
- bool accept_addr = false;
- bool accept_sign = false;
+ struct sk_buff *rskb;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 ddom = msg_dest_domain(hdr);
+ u32 onode = msg_prevnode(hdr);
+ u32 net_id = msg_bc_netid(hdr);
+ u32 mtyp = msg_type(hdr);
+ u32 signature = msg_node_sig(hdr);
+ u16 caps = msg_node_capabilities(hdr);
bool respond = false;
+ bool dupl_addr = false;
- bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
- kfree_skb(buf);
+ bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
+ kfree_skb(skb);
/* Ensure message from node is valid and communication is permitted */
if (net_id != tn->net_id)
@@ -165,102 +159,20 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
if (!tipc_in_scope(bearer->domain, onode))
return;
- node = tipc_node_create(net, onode);
- if (!node)
- return;
- tipc_node_lock(node);
- node->capabilities = caps;
- link = node->links[bearer->identity];
-
- /* Prepare to validate requesting node's signature and media address */
- sign_match = (signature == node->signature);
- addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
- link_up = link && tipc_link_is_up(link);
-
-
- /* These three flags give us eight permutations: */
-
- if (sign_match && addr_match && link_up) {
- /* All is fine. Do nothing. */
- } else if (sign_match && addr_match && !link_up) {
- /* Respond. The link will come up in due time */
- respond = true;
- } else if (sign_match && !addr_match && link_up) {
- /* Peer has changed i/f address without rebooting.
- * If so, the link will reset soon, and the next
- * discovery will be accepted. So we can ignore it.
- * It may also be an cloned or malicious peer having
- * chosen the same node address and signature as an
- * existing one.
- * Ignore requests until the link goes down, if ever.
- */
- disc_dupl_alert(bearer, onode, &maddr);
- } else if (sign_match && !addr_match && !link_up) {
- /* Peer link has changed i/f address without rebooting.
- * It may also be a cloned or malicious peer; we can't
- * distinguish between the two.
- * The signature is correct, so we must accept.
- */
- accept_addr = true;
- respond = true;
- } else if (!sign_match && addr_match && link_up) {
- /* Peer node rebooted. Two possibilities:
- * - Delayed re-discovery; this link endpoint has already
- * reset and re-established contact with the peer, before
- * receiving a discovery message from that node.
- * (The peer happened to receive one from this node first).
- * - The peer came back so fast that our side has not
- * discovered it yet. Probing from this side will soon
- * reset the link, since there can be no working link
- * endpoint at the peer end, and the link will re-establish.
- * Accept the signature, since it comes from a known peer.
- */
- accept_sign = true;
- } else if (!sign_match && addr_match && !link_up) {
- /* The peer node has rebooted.
- * Accept signature, since it is a known peer.
- */
- accept_sign = true;
- respond = true;
- } else if (!sign_match && !addr_match && link_up) {
- /* Peer rebooted with new address, or a new/duplicate peer.
- * Ignore until the link goes down, if ever.
- */
+ tipc_node_check_dest(net, onode, bearer, caps, signature,
+ &maddr, &respond, &dupl_addr);
+ if (dupl_addr)
disc_dupl_alert(bearer, onode, &maddr);
- } else if (!sign_match && !addr_match && !link_up) {
- /* Peer rebooted with new address, or it is a new peer.
- * Accept signature and address.
- */
- accept_sign = true;
- accept_addr = true;
- respond = true;
- }
-
- if (accept_sign)
- node->signature = signature;
-
- if (accept_addr) {
- if (!link)
- link = tipc_link_create(node, bearer, &maddr);
- if (link) {
- memcpy(&link->media_addr, &maddr, sizeof(maddr));
- tipc_link_reset(link);
- } else {
- respond = false;
- }
- }
/* Send response, if necessary */
if (respond && (mtyp == DSC_REQ_MSG)) {
- rbuf = tipc_buf_acquire(MAX_H_SIZE);
- if (rbuf) {
- tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
- tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
- kfree_skb(rbuf);
+ rskb = tipc_buf_acquire(MAX_H_SIZE);
+ if (rskb) {
+ tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
+ tipc_bearer_send(net, bearer->identity, rskb, &maddr);
+ kfree_skb(rskb);
}
}
- tipc_node_unlock(node);
- tipc_node_put(node);
}
/**
diff --git a/net/tipc/link.c b/net/tipc/link.c
index eaa9fe54b4ae..f067e5425560 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -48,9 +48,8 @@
/*
* Error message prefixes
*/
-static const char *link_co_err = "Link changeover error, ";
+static const char *link_co_err = "Link tunneling error, ";
static const char *link_rst_msg = "Resetting link ";
-static const char *link_unk_evt = "Unknown link event ";
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
[TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
@@ -77,256 +76,413 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
};
/*
+ * Interval between NACKs when packets arrive out of order
+ */
+#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
+/*
* Out-of-range value for link session numbers
*/
-#define INVALID_SESSION 0x10000
+#define WILDCARD_SESSION 0x10000
-/*
- * Link state events:
+/* Link FSM states:
*/
-#define STARTING_EVT 856384768 /* link processing trigger */
-#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
-#define SILENCE_EVT 560817u /* timer dicovered silence from peer */
+enum {
+ LINK_ESTABLISHED = 0xe,
+ LINK_ESTABLISHING = 0xe << 4,
+ LINK_RESET = 0x1 << 8,
+ LINK_RESETTING = 0x2 << 12,
+ LINK_PEER_RESET = 0xd << 16,
+ LINK_FAILINGOVER = 0xf << 20,
+ LINK_SYNCHING = 0xc << 24
+};
-/*
- * State value stored in 'failover_pkts'
+/* Link FSM state checking routines
*/
-#define FIRST_FAILOVER 0xffffu
-
-static void link_handle_out_of_seq_msg(struct tipc_link *link,
- struct sk_buff *skb);
-static void tipc_link_proto_rcv(struct tipc_link *link,
- struct sk_buff *skb);
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
-static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static int link_is_up(struct tipc_link *l)
+{
+ return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
+}
+
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+ u16 rcvgap, int tolerance, int priority,
+ struct sk_buff_head *xmitq);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
-static void tipc_link_sync_xmit(struct tipc_link *l);
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
-static void link_set_timer(struct tipc_link *link, unsigned long time);
+
/*
- * Simple link routines
+ * Simple non-static link routines (i.e. referenced outside this file)
*/
-static unsigned int align(unsigned int i)
+bool tipc_link_is_up(struct tipc_link *l)
{
- return (i + 3) & ~3u;
+ return link_is_up(l);
}
-static void tipc_link_release(struct kref *kref)
+bool tipc_link_is_reset(struct tipc_link *l)
{
- kfree(container_of(kref, struct tipc_link, ref));
+ return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
}
-static void tipc_link_get(struct tipc_link *l_ptr)
+bool tipc_link_is_synching(struct tipc_link *l)
{
- kref_get(&l_ptr->ref);
+ return l->state == LINK_SYNCHING;
}
-static void tipc_link_put(struct tipc_link *l_ptr)
+bool tipc_link_is_failingover(struct tipc_link *l)
{
- kref_put(&l_ptr->ref, tipc_link_release);
+ return l->state == LINK_FAILINGOVER;
}
-static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
+bool tipc_link_is_blocked(struct tipc_link *l)
{
- if (l->owner->active_links[0] != l)
- return l->owner->active_links[0];
- return l->owner->active_links[1];
+ return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
}
-/*
- * Simple non-static link routines (i.e. referenced outside this file)
- */
-int tipc_link_is_up(struct tipc_link *l_ptr)
+int tipc_link_is_active(struct tipc_link *l)
{
- if (!l_ptr)
- return 0;
- return link_working_working(l_ptr) || link_working_unknown(l_ptr);
+ struct tipc_node *n = l->owner;
+
+ return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
}
-int tipc_link_is_active(struct tipc_link *l_ptr)
+static u32 link_own_addr(struct tipc_link *l)
{
- return (l_ptr->owner->active_links[0] == l_ptr) ||
- (l_ptr->owner->active_links[1] == l_ptr);
+ return msg_prevnode(l->pmsg);
}
/**
- * link_timeout - handle expiration of link timer
- * @l_ptr: pointer to link
+ * tipc_link_create - create a new link
+ * @n: pointer to associated node
+ * @b: pointer to associated bearer
+ * @ownnode: identity of own node
+ * @peer: identity of peer node
+ * @maddr: media address to be used
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
+ *
+ * Returns true if link was created, otherwise false
*/
-static void link_timeout(unsigned long data)
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+ u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+ struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+ struct tipc_link **link)
{
- struct tipc_link *l_ptr = (struct tipc_link *)data;
- struct sk_buff *skb;
+ struct tipc_link *l;
+ struct tipc_msg *hdr;
+ char *if_name;
+
+ l = kzalloc(sizeof(*l), GFP_ATOMIC);
+ if (!l)
+ return false;
+ *link = l;
+
+ /* Note: peer i/f name is completed by reset/activate message */
+ if_name = strchr(b->name, ':') + 1;
+ sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
+ tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
+ if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+
+ l->addr = peer;
+ l->media_addr = maddr;
+ l->owner = n;
+ l->peer_session = WILDCARD_SESSION;
+ l->bearer_id = b->identity;
+ l->tolerance = b->tolerance;
+ l->net_plane = b->net_plane;
+ l->advertised_mtu = b->mtu;
+ l->mtu = b->mtu;
+ l->priority = b->priority;
+ tipc_link_set_queue_limits(l, b->window);
+ l->inputq = inputq;
+ l->namedq = namedq;
+ l->state = LINK_RESETTING;
+ l->pmsg = (struct tipc_msg *)&l->proto_msg;
+ hdr = l->pmsg;
+ tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
+ msg_set_size(hdr, sizeof(l->proto_msg));
+ msg_set_session(hdr, session);
+ msg_set_bearer_id(hdr, l->bearer_id);
+ strcpy((char *)msg_data(hdr), if_name);
+ __skb_queue_head_init(&l->transmq);
+ __skb_queue_head_init(&l->backlogq);
+ __skb_queue_head_init(&l->deferdq);
+ skb_queue_head_init(&l->wakeupq);
+ skb_queue_head_init(l->inputq);
+ return true;
+}
- tipc_node_lock(l_ptr->owner);
+/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ u16 last_sent;
- /* update counters used in statistical profiling of send traffic */
- l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
- l_ptr->stats.queue_sz_counts++;
+ skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+ 0, l->addr, link_own_addr(l), 0, 0, 0);
+ if (!skb)
+ return;
+ last_sent = tipc_bclink_get_last_sent(l->owner->net);
+ msg_set_last_bcast(buf_msg(skb), last_sent);
+ __skb_queue_head_init(&list);
+ __skb_queue_tail(&list, skb);
+ tipc_link_xmit(l, &list, xmitq);
+}
- skb = skb_peek(&l_ptr->transmq);
- if (skb) {
- struct tipc_msg *msg = buf_msg(skb);
- u32 length = msg_size(msg);
+/**
+ * tipc_link_fsm_evt - link finite state machine
+ * @l: pointer to link
+ * @evt: state machine event to be processed
+ */
+int tipc_link_fsm_evt(struct tipc_link *l, int evt)
+{
+ int rc = 0;
- if ((msg_user(msg) == MSG_FRAGMENTER) &&
- (msg_type(msg) == FIRST_FRAGMENT)) {
- length = msg_size(msg_get_wrapped(msg));
+ switch (l->state) {
+ case LINK_RESETTING:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_PEER_RESET;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_FAILURE_EVT:
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILOVER_END_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
}
- if (length) {
- l_ptr->stats.msg_lengths_total += length;
- l_ptr->stats.msg_length_counts++;
- if (length <= 64)
- l_ptr->stats.msg_length_profile[0]++;
- else if (length <= 256)
- l_ptr->stats.msg_length_profile[1]++;
- else if (length <= 1024)
- l_ptr->stats.msg_length_profile[2]++;
- else if (length <= 4096)
- l_ptr->stats.msg_length_profile[3]++;
- else if (length <= 16384)
- l_ptr->stats.msg_length_profile[4]++;
- else if (length <= 32768)
- l_ptr->stats.msg_length_profile[5]++;
- else
- l_ptr->stats.msg_length_profile[6]++;
+ break;
+ case LINK_RESET:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_ESTABLISHING;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ l->state = LINK_FAILINGOVER;
+ case LINK_FAILURE_EVT:
+ case LINK_RESET_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILOVER_END_EVT:
+ break;
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_PEER_RESET:
+ switch (evt) {
+ case LINK_RESET_EVT:
+ l->state = LINK_ESTABLISHING;
+ break;
+ case LINK_PEER_RESET_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILURE_EVT:
+ break;
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
}
+ break;
+ case LINK_FAILINGOVER:
+ switch (evt) {
+ case LINK_FAILOVER_END_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_PEER_RESET_EVT:
+ case LINK_RESET_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILURE_EVT:
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_ESTABLISHING:
+ switch (evt) {
+ case LINK_ESTABLISH_EVT:
+ l->state = LINK_ESTABLISHED;
+ rc |= TIPC_LINK_UP_EVT;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ l->state = LINK_FAILINGOVER;
+ break;
+ case LINK_PEER_RESET_EVT:
+ case LINK_RESET_EVT:
+ case LINK_FAILURE_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ break;
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_ESTABLISHED:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_PEER_RESET;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_FAILURE_EVT:
+ l->state = LINK_RESETTING;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_ESTABLISH_EVT:
+ break;
+ case LINK_SYNCH_BEGIN_EVT:
+ l->state = LINK_SYNCHING;
+ break;
+ case LINK_SYNCH_END_EVT:
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_SYNCHING:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_PEER_RESET;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_FAILURE_EVT:
+ l->state = LINK_RESETTING;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_ESTABLISH_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ break;
+ case LINK_SYNCH_END_EVT:
+ l->state = LINK_ESTABLISHED;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ default:
+ pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
}
-
- /* do all other link processing performed on a periodic basis */
- if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
- link_state_event(l_ptr, SILENCE_EVT);
- l_ptr->silent_intv_cnt++;
- if (skb_queue_len(&l_ptr->backlogq))
- tipc_link_push_packets(l_ptr);
- link_set_timer(l_ptr, l_ptr->keepalive_intv);
- tipc_node_unlock(l_ptr->owner);
- tipc_link_put(l_ptr);
-}
-
-static void link_set_timer(struct tipc_link *link, unsigned long time)
-{
- if (!mod_timer(&link->timer, jiffies + time))
- tipc_link_get(link);
+ return rc;
+illegal_evt:
+ pr_err("Illegal FSM event %x in state %x on link %s\n",
+ evt, l->state, l->name);
+ return rc;
}
-/**
- * tipc_link_create - create a new link
- * @n_ptr: pointer to associated node
- * @b_ptr: pointer to associated bearer
- * @media_addr: media address to use when sending messages over link
- *
- * Returns pointer to link.
+/* link_profile_stats - update statistical profiling of traffic
*/
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
- struct tipc_bearer *b_ptr,
- const struct tipc_media_addr *media_addr)
+static void link_profile_stats(struct tipc_link *l)
{
- struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
- struct tipc_link *l_ptr;
+ struct sk_buff *skb;
struct tipc_msg *msg;
- char *if_name;
- char addr_string[16];
- u32 peer = n_ptr->addr;
+ int length;
- if (n_ptr->link_cnt >= MAX_BEARERS) {
- tipc_addr_string_fill(addr_string, n_ptr->addr);
- pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
- n_ptr->link_cnt, addr_string, MAX_BEARERS);
- return NULL;
- }
+ /* Update counters used in statistical profiling of send traffic */
+ l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
+ l->stats.queue_sz_counts++;
- if (n_ptr->links[b_ptr->identity]) {
- tipc_addr_string_fill(addr_string, n_ptr->addr);
- pr_err("Attempt to establish second link on <%s> to %s\n",
- b_ptr->name, addr_string);
- return NULL;
- }
+ skb = skb_peek(&l->transmq);
+ if (!skb)
+ return;
+ msg = buf_msg(skb);
+ length = msg_size(msg);
- l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
- if (!l_ptr) {
- pr_warn("Link creation failed, no memory\n");
- return NULL;
+ if (msg_user(msg) == MSG_FRAGMENTER) {
+ if (msg_type(msg) != FIRST_FRAGMENT)
+ return;
+ length = msg_size(msg_get_wrapped(msg));
}
- kref_init(&l_ptr->ref);
- l_ptr->addr = peer;
- if_name = strchr(b_ptr->name, ':') + 1;
- sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
- tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
- tipc_node(tn->own_addr),
- if_name,
- tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
- /* note: peer i/f name is updated by reset/activate message */
- memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
- l_ptr->owner = n_ptr;
- l_ptr->peer_session = INVALID_SESSION;
- l_ptr->bearer_id = b_ptr->identity;
- link_set_supervision_props(l_ptr, b_ptr->tolerance);
- l_ptr->state = RESET_UNKNOWN;
-
- l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
- msg = l_ptr->pmsg;
- tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
- l_ptr->addr);
- msg_set_size(msg, sizeof(l_ptr->proto_msg));
- msg_set_session(msg, (tn->random & 0xffff));
- msg_set_bearer_id(msg, b_ptr->identity);
- strcpy((char *)msg_data(msg), if_name);
- l_ptr->net_plane = b_ptr->net_plane;
- l_ptr->advertised_mtu = b_ptr->mtu;
- l_ptr->mtu = l_ptr->advertised_mtu;
- l_ptr->priority = b_ptr->priority;
- tipc_link_set_queue_limits(l_ptr, b_ptr->window);
- l_ptr->snd_nxt = 1;
- __skb_queue_head_init(&l_ptr->transmq);
- __skb_queue_head_init(&l_ptr->backlogq);
- __skb_queue_head_init(&l_ptr->deferdq);
- skb_queue_head_init(&l_ptr->wakeupq);
- skb_queue_head_init(&l_ptr->inputq);
- skb_queue_head_init(&l_ptr->namedq);
- link_reset_statistics(l_ptr);
- tipc_node_attach_link(n_ptr, l_ptr);
- setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
- link_state_event(l_ptr, STARTING_EVT);
-
- return l_ptr;
+ l->stats.msg_lengths_total += length;
+ l->stats.msg_length_counts++;
+ if (length <= 64)
+ l->stats.msg_length_profile[0]++;
+ else if (length <= 256)
+ l->stats.msg_length_profile[1]++;
+ else if (length <= 1024)
+ l->stats.msg_length_profile[2]++;
+ else if (length <= 4096)
+ l->stats.msg_length_profile[3]++;
+ else if (length <= 16384)
+ l->stats.msg_length_profile[4]++;
+ else if (length <= 32768)
+ l->stats.msg_length_profile[5]++;
+ else
+ l->stats.msg_length_profile[6]++;
}
-/**
- * tipc_link_delete - Delete a link
- * @l: link to be deleted
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
*/
-void tipc_link_delete(struct tipc_link *l)
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
{
- tipc_link_reset(l);
- if (del_timer(&l->timer))
- tipc_link_put(l);
- l->flags |= LINK_STOPPED;
- /* Delete link now, or when timer is finished: */
- tipc_link_reset_fragments(l);
- tipc_node_detach_link(l->owner, l);
- tipc_link_put(l);
-}
+ int rc = 0;
+ int mtyp = STATE_MSG;
+ bool xmit = false;
+ bool prb = false;
+
+ link_profile_stats(l);
+
+ switch (l->state) {
+ case LINK_ESTABLISHED:
+ case LINK_SYNCHING:
+ if (!l->silent_intv_cnt) {
+ if (tipc_bclink_acks_missing(l->owner))
+ xmit = true;
+ } else if (l->silent_intv_cnt <= l->abort_limit) {
+ xmit = true;
+ prb = true;
+ } else {
+ rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ }
+ l->silent_intv_cnt++;
+ break;
+ case LINK_RESET:
+ xmit = true;
+ mtyp = RESET_MSG;
+ break;
+ case LINK_ESTABLISHING:
+ xmit = true;
+ mtyp = ACTIVATE_MSG;
+ break;
+ case LINK_PEER_RESET:
+ case LINK_RESETTING:
+ case LINK_FAILINGOVER:
+ break;
+ default:
+ break;
+ }
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *link;
- struct tipc_node *node;
+ if (xmit)
+ tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
- rcu_read_lock();
- list_for_each_entry_rcu(node, &tn->node_list, list) {
- tipc_node_lock(node);
- link = node->links[bearer_id];
- if (link)
- tipc_link_delete(link);
- tipc_node_unlock(node);
- }
- rcu_read_unlock();
+ return rc;
}
/**
@@ -334,7 +490,7 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
* @link: congested link
* @list: message that was attempted sent
* Create pseudo msg to send back to user when congestion abates
- * Only consumes message if there is an error
+ * Does not consume buffer list
*/
static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
{
@@ -347,8 +503,7 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
/* This really cannot happen... */
if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
- tipc_link_reset(link);
- goto err;
+ return -ENOBUFS;
}
/* Non-blocking sender: */
if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
@@ -358,15 +513,12 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
addr, addr, oport, 0, 0);
if (!skb)
- goto err;
+ return -ENOBUFS;
TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
TIPC_SKB_CB(skb)->chain_imp = imp;
skb_queue_tail(&link->wakeupq, skb);
link->stats.link_congs++;
return -ELINKCONG;
-err:
- __skb_queue_purge(list);
- return -ENOBUFS;
}
/**
@@ -388,9 +540,7 @@ void link_prepare_wakeup(struct tipc_link *l)
if ((pnd[imp] + l->backlog[imp].len) >= lim)
break;
skb_unlink(skb, &l->wakeupq);
- skb_queue_tail(&l->inputq, skb);
- l->owner->inputq = &l->inputq;
- l->owner->action_flags |= TIPC_MSG_EVT;
+ skb_queue_tail(l->inputq, skb);
}
}
@@ -426,208 +576,36 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
tipc_link_reset_fragments(l_ptr);
}
-void tipc_link_reset(struct tipc_link *l_ptr)
+void tipc_link_reset(struct tipc_link *l)
{
- u32 prev_state = l_ptr->state;
- int was_active_link = tipc_link_is_active(l_ptr);
- struct tipc_node *owner = l_ptr->owner;
- struct tipc_link *pl = tipc_parallel_link(l_ptr);
-
- msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
+ tipc_link_fsm_evt(l, LINK_RESET_EVT);
/* Link is down, accept any session */
- l_ptr->peer_session = INVALID_SESSION;
-
- /* Prepare for renewed mtu size negotiation */
- l_ptr->mtu = l_ptr->advertised_mtu;
-
- l_ptr->state = RESET_UNKNOWN;
+ l->peer_session = WILDCARD_SESSION;
- if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
- return;
-
- tipc_node_link_down(l_ptr->owner, l_ptr);
- tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
+ /* If peer is up, it only accepts an incremented session number */
+ msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
- if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
- l_ptr->flags |= LINK_FAILINGOVER;
- l_ptr->failover_checkpt = l_ptr->rcv_nxt;
- pl->failover_pkts = FIRST_FAILOVER;
- pl->failover_checkpt = l_ptr->rcv_nxt;
- pl->failover_skb = l_ptr->reasm_buf;
- } else {
- kfree_skb(l_ptr->reasm_buf);
- }
- /* Clean up all queues, except inputq: */
- __skb_queue_purge(&l_ptr->transmq);
- __skb_queue_purge(&l_ptr->deferdq);
- if (!owner->inputq)
- owner->inputq = &l_ptr->inputq;
- skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
- if (!skb_queue_empty(owner->inputq))
- owner->action_flags |= TIPC_MSG_EVT;
- tipc_link_purge_backlog(l_ptr);
- l_ptr->reasm_buf = NULL;
- l_ptr->rcv_unacked = 0;
- l_ptr->snd_nxt = 1;
- l_ptr->silent_intv_cnt = 0;
- l_ptr->stale_count = 0;
- link_reset_statistics(l_ptr);
-}
-
-static void link_activate(struct tipc_link *link)
-{
- struct tipc_node *node = link->owner;
-
- link->rcv_nxt = 1;
- link->stats.recv_info = 1;
- link->silent_intv_cnt = 0;
- tipc_node_link_up(node, link);
- tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
-}
-
-/**
- * link_state_event - link finite state machine
- * @l_ptr: pointer to link
- * @event: state machine event to process
- */
-static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
-{
- struct tipc_link *other;
- unsigned long timer_intv = l_ptr->keepalive_intv;
-
- if (l_ptr->flags & LINK_STOPPED)
- return;
-
- if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
- return; /* Not yet. */
-
- if (l_ptr->flags & LINK_FAILINGOVER)
- return;
-
- switch (l_ptr->state) {
- case WORKING_WORKING:
- switch (event) {
- case TRAFFIC_MSG_EVT:
- case ACTIVATE_MSG:
- l_ptr->silent_intv_cnt = 0;
- break;
- case SILENCE_EVT:
- if (!l_ptr->silent_intv_cnt) {
- if (tipc_bclink_acks_missing(l_ptr->owner))
- tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 0, 0, 0, 0);
- break;
- }
- l_ptr->state = WORKING_UNKNOWN;
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
- break;
- case RESET_MSG:
- pr_debug("%s<%s>, requested by peer\n",
- link_rst_msg, l_ptr->name);
- tipc_link_reset(l_ptr);
- l_ptr->state = RESET_RESET;
- tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 0, 0, 0, 0);
- break;
- default:
- pr_debug("%s%u in WW state\n", link_unk_evt, event);
- }
- break;
- case WORKING_UNKNOWN:
- switch (event) {
- case TRAFFIC_MSG_EVT:
- case ACTIVATE_MSG:
- l_ptr->state = WORKING_WORKING;
- l_ptr->silent_intv_cnt = 0;
- break;
- case RESET_MSG:
- pr_debug("%s<%s>, requested by peer while probing\n",
- link_rst_msg, l_ptr->name);
- tipc_link_reset(l_ptr);
- l_ptr->state = RESET_RESET;
- tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 0, 0, 0, 0);
- break;
- case SILENCE_EVT:
- if (!l_ptr->silent_intv_cnt) {
- l_ptr->state = WORKING_WORKING;
- if (tipc_bclink_acks_missing(l_ptr->owner))
- tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 0, 0, 0, 0);
- } else if (l_ptr->silent_intv_cnt <
- l_ptr->abort_limit) {
- tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 1, 0, 0, 0);
- } else { /* Link has failed */
- pr_debug("%s<%s>, peer not responding\n",
- link_rst_msg, l_ptr->name);
- tipc_link_reset(l_ptr);
- l_ptr->state = RESET_UNKNOWN;
- tipc_link_proto_xmit(l_ptr, RESET_MSG,
- 0, 0, 0, 0);
- }
- break;
- default:
- pr_err("%s%u in WU state\n", link_unk_evt, event);
- }
- break;
- case RESET_UNKNOWN:
- switch (event) {
- case TRAFFIC_MSG_EVT:
- break;
- case ACTIVATE_MSG:
- other = l_ptr->owner->active_links[0];
- if (other && link_working_unknown(other))
- break;
- l_ptr->state = WORKING_WORKING;
- link_activate(l_ptr);
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
- if (l_ptr->owner->working_links == 1)
- tipc_link_sync_xmit(l_ptr);
- break;
- case RESET_MSG:
- l_ptr->state = RESET_RESET;
- tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 1, 0, 0, 0);
- break;
- case STARTING_EVT:
- l_ptr->flags |= LINK_STARTED;
- link_set_timer(l_ptr, timer_intv);
- break;
- case SILENCE_EVT:
- tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
- break;
- default:
- pr_err("%s%u in RU state\n", link_unk_evt, event);
- }
- break;
- case RESET_RESET:
- switch (event) {
- case TRAFFIC_MSG_EVT:
- case ACTIVATE_MSG:
- other = l_ptr->owner->active_links[0];
- if (other && link_working_unknown(other))
- break;
- l_ptr->state = WORKING_WORKING;
- link_activate(l_ptr);
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
- if (l_ptr->owner->working_links == 1)
- tipc_link_sync_xmit(l_ptr);
- break;
- case RESET_MSG:
- break;
- case SILENCE_EVT:
- tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 0, 0, 0, 0);
- break;
- default:
- pr_err("%s%u in RR state\n", link_unk_evt, event);
- }
- break;
- default:
- pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
- }
+ /* Prepare for renewed mtu size negotiation */
+ l->mtu = l->advertised_mtu;
+
+ /* Clean up all queues: */
+ __skb_queue_purge(&l->transmq);
+ __skb_queue_purge(&l->deferdq);
+ skb_queue_splice_init(&l->wakeupq, l->inputq);
+
+ tipc_link_purge_backlog(l);
+ kfree_skb(l->reasm_buf);
+ kfree_skb(l->failover_reasm_skb);
+ l->reasm_buf = NULL;
+ l->failover_reasm_skb = NULL;
+ l->rcv_unacked = 0;
+ l->snd_nxt = 1;
+ l->rcv_nxt = 1;
+ l->silent_intv_cnt = 0;
+ l->stats.recv_info = 0;
+ l->stale_count = 0;
+ link_reset_statistics(l);
}
/**
@@ -635,8 +613,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
* @link: link to use
* @list: chain of buffers containing message
*
- * Consumes the buffer chain, except when returning -ELINKCONG,
- * since the caller then may want to make more send attempts.
+ * Consumes the buffer chain, except when returning an error code,
* Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
* Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
*/
@@ -650,7 +627,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
u16 ack = mod(link->rcv_nxt - 1);
u16 seqno = link->snd_nxt;
u16 bc_last_in = link->owner->bclink.last_in;
- struct tipc_media_addr *addr = &link->media_addr;
+ struct tipc_media_addr *addr = link->media_addr;
struct sk_buff_head *transmq = &link->transmq;
struct sk_buff_head *backlogq = &link->backlogq;
struct sk_buff *skb, *bskb;
@@ -660,10 +637,9 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
return link_schedule_user(link, list);
}
- if (unlikely(msg_size(msg) > mtu)) {
- __skb_queue_purge(list);
+ if (unlikely(msg_size(msg) > mtu))
return -EMSGSIZE;
- }
+
/* Prepare each packet for sending, and add to relevant queue: */
while (skb_queue_len(list)) {
skb = skb_peek(list);
@@ -700,101 +676,76 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
return 0;
}
-static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
-{
- skb_queue_head_init(list);
- __skb_queue_tail(list, skb);
-}
-
-static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
-{
- struct sk_buff_head head;
-
- skb2list(skb, &head);
- return __tipc_link_xmit(link->owner->net, link, &head);
-}
-
-/* tipc_link_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
- * messages, which will not be rejected
- * The only exception is datagram messages rerouted after secondary
- * lookup, which are rare and safe to dispose of anyway.
- * TODO: Return real return value, and let callers use
- * tipc_wait_for_sendpkt() where applicable
- */
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
- u32 selector)
-{
- struct sk_buff_head head;
- int rc;
-
- skb2list(skb, &head);
- rc = tipc_link_xmit(net, &head, dnode, selector);
- if (rc == -ELINKCONG)
- kfree_skb(skb);
- return 0;
-}
-
/**
- * tipc_link_xmit() is the general link level function for message sending
- * @net: the applicable net namespace
+ * tipc_link_xmit(): enqueue buffer list according to queue situation
+ * @link: link to use
* @list: chain of buffers containing message
- * @dsz: amount of user data to be sent
- * @dnode: address of destination node
- * @selector: a number used for deterministic link selection
- * Consumes the buffer chain, except when returning -ELINKCONG
- * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ * @xmitq: returned list of packets to be sent by caller
+ *
+ * Consumes the buffer chain, except when returning -ELINKCONG,
+ * since the caller then may want to make more send attempts.
+ * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
*/
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
- u32 selector)
+int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+ struct sk_buff_head *xmitq)
{
- struct tipc_link *link = NULL;
- struct tipc_node *node;
- int rc = -EHOSTUNREACH;
+ struct tipc_msg *hdr = buf_msg(skb_peek(list));
+ unsigned int maxwin = l->window;
+ unsigned int i, imp = msg_importance(hdr);
+ unsigned int mtu = l->mtu;
+ u16 ack = l->rcv_nxt - 1;
+ u16 seqno = l->snd_nxt;
+ u16 bc_last_in = l->owner->bclink.last_in;
+ struct sk_buff_head *transmq = &l->transmq;
+ struct sk_buff_head *backlogq = &l->backlogq;
+ struct sk_buff *skb, *_skb, *bskb;
- node = tipc_node_find(net, dnode);
- if (node) {
- tipc_node_lock(node);
- link = node->active_links[selector & 1];
- if (link)
- rc = __tipc_link_xmit(net, link, list);
- tipc_node_unlock(node);
- tipc_node_put(node);
- }
- if (link)
- return rc;
-
- if (likely(in_own_node(net, dnode))) {
- tipc_sk_rcv(net, list);
- return 0;
+ /* Match msg importance against this and all higher backlog limits: */
+ for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+ if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
+ return link_schedule_user(l, list);
}
+ if (unlikely(msg_size(hdr) > mtu))
+ return -EMSGSIZE;
- __skb_queue_purge(list);
- return rc;
-}
-
-/*
- * tipc_link_sync_xmit - synchronize broadcast link endpoints.
- *
- * Give a newly added peer node the sequence number where it should
- * start receiving and acking broadcast packets.
- *
- * Called with node locked
- */
-static void tipc_link_sync_xmit(struct tipc_link *link)
-{
- struct sk_buff *skb;
- struct tipc_msg *msg;
-
- skb = tipc_buf_acquire(INT_H_SIZE);
- if (!skb)
- return;
+ /* Prepare each packet for sending, and add to relevant queue: */
+ while (skb_queue_len(list)) {
+ skb = skb_peek(list);
+ hdr = buf_msg(skb);
+ msg_set_seqno(hdr, seqno);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_last_in);
- msg = buf_msg(skb);
- tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
- INT_H_SIZE, link->addr);
- msg_set_last_bcast(msg, link->owner->bclink.acked);
- __tipc_link_xmit_skb(link, skb);
+ if (likely(skb_queue_len(transmq) < maxwin)) {
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb)
+ return -ENOBUFS;
+ __skb_dequeue(list);
+ __skb_queue_tail(transmq, skb);
+ __skb_queue_tail(xmitq, _skb);
+ l->rcv_unacked = 0;
+ seqno++;
+ continue;
+ }
+ if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+ kfree_skb(__skb_dequeue(list));
+ l->stats.sent_bundled++;
+ continue;
+ }
+ if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+ kfree_skb(__skb_dequeue(list));
+ __skb_queue_tail(backlogq, bskb);
+ l->backlog[msg_importance(buf_msg(bskb))].len++;
+ l->stats.sent_bundled++;
+ l->stats.sent_bundles++;
+ continue;
+ }
+ l->backlog[imp].len += skb_queue_len(list);
+ skb_queue_splice_tail_init(list, backlogq);
+ }
+ l->snd_nxt = seqno;
+ return 0;
}
/*
@@ -842,29 +793,37 @@ void tipc_link_push_packets(struct tipc_link *link)
link->rcv_unacked = 0;
__skb_queue_tail(&link->transmq, skb);
tipc_bearer_send(link->owner->net, link->bearer_id,
- skb, &link->media_addr);
+ skb, link->media_addr);
}
link->snd_nxt = seqno;
}
-void tipc_link_reset_all(struct tipc_node *node)
+void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
{
- char addr_string[16];
- u32 i;
-
- tipc_node_lock(node);
+ struct sk_buff *skb, *_skb;
+ struct tipc_msg *hdr;
+ u16 seqno = l->snd_nxt;
+ u16 ack = l->rcv_nxt - 1;
- pr_warn("Resetting all links to %s\n",
- tipc_addr_string_fill(addr_string, node->addr));
-
- for (i = 0; i < MAX_BEARERS; i++) {
- if (node->links[i]) {
- link_print(node->links[i], "Resetting link\n");
- tipc_link_reset(node->links[i]);
- }
+ while (skb_queue_len(&l->transmq) < l->window) {
+ skb = skb_peek(&l->backlogq);
+ if (!skb)
+ break;
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb)
+ break;
+ __skb_dequeue(&l->backlogq);
+ hdr = buf_msg(skb);
+ l->backlog[msg_importance(hdr)].len--;
+ __skb_queue_tail(&l->transmq, skb);
+ __skb_queue_tail(xmitq, _skb);
+ msg_set_ack(hdr, ack);
+ msg_set_seqno(hdr, seqno);
+ msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+ l->rcv_unacked = 0;
+ seqno++;
}
-
- tipc_node_unlock(node);
+ l->snd_nxt = seqno;
}
static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -877,9 +836,12 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
if (l_ptr->addr) {
/* Handle failure on standard link */
- link_print(l_ptr, "Resetting link\n");
- tipc_link_reset(l_ptr);
-
+ link_print(l_ptr, "Resetting link ");
+ pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+ msg_user(msg), msg_type(msg), msg_size(msg),
+ msg_errcode(msg));
+ pr_info("sqno %u, prev: %x, src: %x\n",
+ msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
} else {
/* Handle failure on broadcast link */
struct tipc_node *n_ptr;
@@ -934,191 +896,45 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
- &l_ptr->media_addr);
+ l_ptr->media_addr);
retransmits--;
l_ptr->stats.retransmitted++;
}
}
-/* link_synch(): check if all packets arrived before the synch
- * point have been consumed
- * Returns true if the parallel links are synched, otherwise false
- */
-static bool link_synch(struct tipc_link *l)
+static int tipc_link_retransm(struct tipc_link *l, int retransm,
+ struct sk_buff_head *xmitq)
{
- unsigned int post_synch;
- struct tipc_link *pl;
+ struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
+ struct tipc_msg *hdr;
- pl = tipc_parallel_link(l);
- if (pl == l)
- goto synched;
-
- /* Was last pre-synch packet added to input queue ? */
- if (less_eq(pl->rcv_nxt, l->synch_point))
- return false;
-
- /* Is it still in the input queue ? */
- post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
- if (skb_queue_len(&pl->inputq) > post_synch)
- return false;
-synched:
- l->flags &= ~LINK_SYNCHING;
- return true;
-}
-
-static void link_retrieve_defq(struct tipc_link *link,
- struct sk_buff_head *list)
-{
- u16 seq_no;
-
- if (skb_queue_empty(&link->deferdq))
- return;
-
- seq_no = buf_seqno(skb_peek(&link->deferdq));
- if (seq_no == link->rcv_nxt)
- skb_queue_splice_tail_init(&link->deferdq, list);
-}
-
-/**
- * tipc_rcv - process TIPC packets/messages arriving from off-node
- * @net: the applicable net namespace
- * @skb: TIPC packet
- * @b_ptr: pointer to bearer message arrived on
- *
- * Invoked with no locks held. Bearer pointer must point to a valid bearer
- * structure (i.e. cannot be NULL), but bearer can be inactive.
- */
-void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct sk_buff_head head;
- struct tipc_node *n_ptr;
- struct tipc_link *l_ptr;
- struct sk_buff *skb1, *tmp;
- struct tipc_msg *msg;
- u16 seq_no;
- u16 ackd;
- u32 released;
-
- skb2list(skb, &head);
-
- while ((skb = __skb_dequeue(&head))) {
- /* Ensure message is well-formed */
- if (unlikely(!tipc_msg_validate(skb)))
- goto discard;
-
- /* Handle arrival of a non-unicast link message */
- msg = buf_msg(skb);
- if (unlikely(msg_non_seq(msg))) {
- if (msg_user(msg) == LINK_CONFIG)
- tipc_disc_rcv(net, skb, b_ptr);
- else
- tipc_bclink_rcv(net, skb);
- continue;
- }
-
- /* Discard unicast link messages destined for another node */
- if (unlikely(!msg_short(msg) &&
- (msg_destnode(msg) != tn->own_addr)))
- goto discard;
-
- /* Locate neighboring node that sent message */
- n_ptr = tipc_node_find(net, msg_prevnode(msg));
- if (unlikely(!n_ptr))
- goto discard;
-
- tipc_node_lock(n_ptr);
- /* Locate unicast link endpoint that should handle message */
- l_ptr = n_ptr->links[b_ptr->identity];
- if (unlikely(!l_ptr))
- goto unlock;
-
- /* Verify that communication with node is currently allowed */
- if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
- msg_user(msg) == LINK_PROTOCOL &&
- (msg_type(msg) == RESET_MSG ||
- msg_type(msg) == ACTIVATE_MSG) &&
- !msg_redundant_link(msg))
- n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
-
- if (tipc_node_blocked(n_ptr))
- goto unlock;
-
- /* Validate message sequence number info */
- seq_no = msg_seqno(msg);
- ackd = msg_ack(msg);
-
- /* Release acked messages */
- if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
- tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
-
- released = 0;
- skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
- if (more(buf_seqno(skb1), ackd))
- break;
- __skb_unlink(skb1, &l_ptr->transmq);
- kfree_skb(skb1);
- released = 1;
- }
-
- /* Try sending any messages link endpoint has pending */
- if (unlikely(skb_queue_len(&l_ptr->backlogq)))
- tipc_link_push_packets(l_ptr);
-
- if (released && !skb_queue_empty(&l_ptr->wakeupq))
- link_prepare_wakeup(l_ptr);
-
- /* Process the incoming packet */
- if (unlikely(!link_working_working(l_ptr))) {
- if (msg_user(msg) == LINK_PROTOCOL) {
- tipc_link_proto_rcv(l_ptr, skb);
- link_retrieve_defq(l_ptr, &head);
- skb = NULL;
- goto unlock;
- }
-
- /* Traffic message. Conditionally activate link */
- link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-
- if (link_working_working(l_ptr)) {
- /* Re-insert buffer in front of queue */
- __skb_queue_head(&head, skb);
- skb = NULL;
- goto unlock;
- }
- goto unlock;
- }
-
- /* Link is now in state WORKING_WORKING */
- if (unlikely(seq_no != l_ptr->rcv_nxt)) {
- link_handle_out_of_seq_msg(l_ptr, skb);
- link_retrieve_defq(l_ptr, &head);
- skb = NULL;
- goto unlock;
- }
- l_ptr->silent_intv_cnt = 0;
+ if (!skb)
+ return 0;
- /* Synchronize with parallel link if applicable */
- if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
- if (!link_synch(l_ptr))
- goto unlock;
- }
- l_ptr->rcv_nxt++;
- if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
- link_retrieve_defq(l_ptr, &head);
- if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
- l_ptr->stats.sent_acks++;
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
- }
- tipc_link_input(l_ptr, skb);
- skb = NULL;
-unlock:
- tipc_node_unlock(n_ptr);
- tipc_node_put(n_ptr);
-discard:
- if (unlikely(skb))
- kfree_skb(skb);
+ /* Detect repeated retransmit failures on same packet */
+ if (likely(l->last_retransm != buf_seqno(skb))) {
+ l->last_retransm = buf_seqno(skb);
+ l->stale_count = 1;
+ } else if (++l->stale_count > 100) {
+ link_retransmit_failure(l, skb);
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ }
+ skb_queue_walk(&l->transmq, skb) {
+ if (!retransm)
+ return 0;
+ hdr = buf_msg(skb);
+ _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
+ if (!_skb)
+ return 0;
+ hdr = buf_msg(_skb);
+ msg_set_ack(hdr, l->rcv_nxt - 1);
+ msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+ _skb->priority = TC_PRIO_CONTROL;
+ __skb_queue_tail(xmitq, _skb);
+ retransm--;
+ l->stats.retransmitted++;
}
+ return 0;
}
/* tipc_data_input - deliver data and name distr msgs to upper layer
@@ -1126,29 +942,22 @@ discard:
* Consumes buffer if message is of right type
* Node lock must be held
*/
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
+static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
+ struct sk_buff_head *inputq)
{
struct tipc_node *node = link->owner;
- struct tipc_msg *msg = buf_msg(skb);
- u32 dport = msg_destport(msg);
- switch (msg_user(msg)) {
+ switch (msg_user(buf_msg(skb))) {
case TIPC_LOW_IMPORTANCE:
case TIPC_MEDIUM_IMPORTANCE:
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
case CONN_MANAGER:
- if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
- node->inputq = &link->inputq;
- node->action_flags |= TIPC_MSG_EVT;
- }
+ __skb_queue_tail(inputq, skb);
return true;
case NAME_DISTRIBUTOR:
node->bclink.recv_permitted = true;
- node->namedq = &link->namedq;
- skb_queue_tail(&link->namedq, skb);
- if (skb_queue_len(&link->namedq) == 1)
- node->action_flags |= TIPC_NAMED_MSG_EVT;
+ skb_queue_tail(link->namedq, skb);
return true;
case MSG_BUNDLER:
case TUNNEL_PROTOCOL:
@@ -1165,54 +974,160 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
/* tipc_link_input - process packet that has passed link protocol check
*
* Consumes buffer
- * Node lock must be held
*/
-static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *inputq)
{
- struct tipc_node *node = link->owner;
- struct tipc_msg *msg = buf_msg(skb);
+ struct tipc_node *node = l->owner;
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct sk_buff **reasm_skb = &l->reasm_buf;
struct sk_buff *iskb;
+ int usr = msg_user(hdr);
+ int rc = 0;
int pos = 0;
+ int ipos = 0;
- if (likely(tipc_data_input(link, skb)))
- return;
+ if (unlikely(usr == TUNNEL_PROTOCOL)) {
+ if (msg_type(hdr) == SYNCH_MSG) {
+ __skb_queue_purge(&l->deferdq);
+ goto drop;
+ }
+ if (!tipc_msg_extract(skb, &iskb, &ipos))
+ return rc;
+ kfree_skb(skb);
+ skb = iskb;
+ hdr = buf_msg(skb);
+ if (less(msg_seqno(hdr), l->drop_point))
+ goto drop;
+ if (tipc_data_input(l, skb, inputq))
+ return rc;
+ usr = msg_user(hdr);
+ reasm_skb = &l->failover_reasm_skb;
+ }
- switch (msg_user(msg)) {
- case TUNNEL_PROTOCOL:
- if (msg_dup(msg)) {
- link->flags |= LINK_SYNCHING;
- link->synch_point = msg_seqno(msg_get_wrapped(msg));
- kfree_skb(skb);
- break;
+ if (usr == MSG_BUNDLER) {
+ l->stats.recv_bundles++;
+ l->stats.recv_bundled += msg_msgcnt(hdr);
+ while (tipc_msg_extract(skb, &iskb, &pos))
+ tipc_data_input(l, iskb, inputq);
+ return 0;
+ } else if (usr == MSG_FRAGMENTER) {
+ l->stats.recv_fragments++;
+ if (tipc_buf_append(reasm_skb, &skb)) {
+ l->stats.recv_fragmented++;
+ tipc_data_input(l, skb, inputq);
+ } else if (!*reasm_skb) {
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
- if (!tipc_link_failover_rcv(link, &skb))
- break;
- if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
- tipc_data_input(link, skb);
+ return 0;
+ } else if (usr == BCAST_PROTOCOL) {
+ tipc_link_sync_rcv(node, skb);
+ return 0;
+ }
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
+{
+ bool released = false;
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&l->transmq, skb, tmp) {
+ if (more(buf_seqno(skb), acked))
break;
+ __skb_unlink(skb, &l->transmq);
+ kfree_skb(skb);
+ released = true;
+ }
+ return released;
+}
+
+/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
+ * @link: the link that should handle the message
+ * @skb: TIPC packet
+ * @xmitq: queue to place packets to be sent after this call
+ */
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff_head *arrvq = &l->deferdq;
+ struct sk_buff_head tmpq;
+ struct tipc_msg *hdr;
+ u16 seqno, rcv_nxt;
+ int rc = 0;
+
+ __skb_queue_head_init(&tmpq);
+
+ if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
+ if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
+ tipc_link_build_proto_msg(l, STATE_MSG, 0,
+ 0, 0, 0, xmitq);
+ return rc;
+ }
+
+ while ((skb = skb_peek(arrvq))) {
+ hdr = buf_msg(skb);
+
+ /* Verify and update link state */
+ if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
+ __skb_dequeue(arrvq);
+ rc = tipc_link_proto_rcv(l, skb, xmitq);
+ continue;
}
- case MSG_BUNDLER:
- link->stats.recv_bundles++;
- link->stats.recv_bundled += msg_msgcnt(msg);
- while (tipc_msg_extract(skb, &iskb, &pos))
- tipc_data_input(link, iskb);
- break;
- case MSG_FRAGMENTER:
- link->stats.recv_fragments++;
- if (tipc_buf_append(&link->reasm_buf, &skb)) {
- link->stats.recv_fragmented++;
- tipc_data_input(link, skb);
- } else if (!link->reasm_buf) {
- tipc_link_reset(link);
+ if (unlikely(!link_is_up(l))) {
+ rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+ if (!link_is_up(l)) {
+ kfree_skb(__skb_dequeue(arrvq));
+ goto exit;
+ }
}
- break;
- case BCAST_PROTOCOL:
- tipc_link_sync_rcv(node, skb);
- break;
- default:
- break;
- };
+
+ l->silent_intv_cnt = 0;
+
+ /* Forward queues and wake up waiting users */
+ if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
+ tipc_link_advance_backlog(l, xmitq);
+ if (unlikely(!skb_queue_empty(&l->wakeupq)))
+ link_prepare_wakeup(l);
+ }
+
+ /* Defer reception if there is a gap in the sequence */
+ seqno = msg_seqno(hdr);
+ rcv_nxt = l->rcv_nxt;
+ if (unlikely(less(rcv_nxt, seqno))) {
+ l->stats.deferred_recv++;
+ goto exit;
+ }
+
+ __skb_dequeue(arrvq);
+
+ /* Drop if packet already received */
+ if (unlikely(more(rcv_nxt, seqno))) {
+ l->stats.duplicates++;
+ kfree_skb(skb);
+ goto exit;
+ }
+
+ /* Packet can be delivered */
+ l->rcv_nxt++;
+ l->stats.recv_info++;
+ if (unlikely(!tipc_data_input(l, skb, &tmpq)))
+ rc = tipc_link_input(l, skb, &tmpq);
+
+ /* Ack at regular intervals */
+ if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
+ l->rcv_unacked = 0;
+ l->stats.sent_acks++;
+ tipc_link_build_proto_msg(l, STATE_MSG,
+ 0, 0, 0, 0, xmitq);
+ }
+ }
+exit:
+ tipc_skb_queue_splice_tail(&tmpq, l->inputq);
+ return rc;
}
/**
@@ -1255,458 +1170,249 @@ u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
}
/*
- * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
+ * Send protocol message to the other endpoint.
*/
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
- struct sk_buff *buf)
+void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
+ u32 gap, u32 tolerance, u32 priority)
{
- u32 seq_no = buf_seqno(buf);
+ struct sk_buff *skb = NULL;
+ struct sk_buff_head xmitq;
- if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
- tipc_link_proto_rcv(l_ptr, buf);
+ __skb_queue_head_init(&xmitq);
+ tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
+ tolerance, priority, &xmitq);
+ skb = __skb_dequeue(&xmitq);
+ if (!skb)
return;
- }
-
- /* Record OOS packet arrival */
- l_ptr->silent_intv_cnt = 0;
+ tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
+ l->rcv_unacked = 0;
+ kfree_skb(skb);
+}
- /*
- * Discard packet if a duplicate; otherwise add it to deferred queue
- * and notify peer of gap as per protocol specification
- */
- if (less(seq_no, l_ptr->rcv_nxt)) {
- l_ptr->stats.duplicates++;
- kfree_skb(buf);
+/* tipc_link_build_proto_msg: prepare link protocol message for transmission
+ */
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+ u16 rcvgap, int tolerance, int priority,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb = NULL;
+ struct tipc_msg *hdr = l->pmsg;
+ u16 snd_nxt = l->snd_nxt;
+ u16 rcv_nxt = l->rcv_nxt;
+ u16 rcv_last = rcv_nxt - 1;
+ int node_up = l->owner->bclink.recv_permitted;
+
+ /* Don't send protocol message during reset or link failover */
+ if (tipc_link_is_blocked(l))
return;
- }
- if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
- l_ptr->stats.deferred_recv++;
- if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
+ msg_set_type(hdr, mtyp);
+ msg_set_net_plane(hdr, l->net_plane);
+ msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+ msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
+ msg_set_link_tolerance(hdr, tolerance);
+ msg_set_linkprio(hdr, priority);
+ msg_set_redundant_link(hdr, node_up);
+ msg_set_seq_gap(hdr, 0);
+
+ /* Compatibility: created msg must not be in sequence with pkt flow */
+ msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
+
+ if (mtyp == STATE_MSG) {
+ if (!tipc_link_is_up(l))
+ return;
+ msg_set_next_sent(hdr, snd_nxt);
+
+ /* Override rcvgap if there are packets in deferred queue */
+ if (!skb_queue_empty(&l->deferdq))
+ rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
+ if (rcvgap) {
+ msg_set_seq_gap(hdr, rcvgap);
+ l->stats.sent_nacks++;
+ }
+ msg_set_ack(hdr, rcv_last);
+ msg_set_probe(hdr, probe);
+ if (probe)
+ l->stats.sent_probes++;
+ l->stats.sent_states++;
} else {
- l_ptr->stats.duplicates++;
+ /* RESET_MSG or ACTIVATE_MSG */
+ msg_set_max_pkt(hdr, l->advertised_mtu);
+ msg_set_ack(hdr, l->rcv_nxt - 1);
+ msg_set_next_sent(hdr, 1);
}
+ skb = tipc_buf_acquire(msg_size(hdr));
+ if (!skb)
+ return;
+ skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
+ skb->priority = TC_PRIO_CONTROL;
+ __skb_queue_tail(xmitq, skb);
}
-/*
- * Send protocol message to the other endpoint.
+/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
+ * with contents of the link's tranmsit and backlog queues.
*/
-void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
- u32 gap, u32 tolerance, u32 priority)
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ int mtyp, struct sk_buff_head *xmitq)
{
- struct sk_buff *buf = NULL;
- struct tipc_msg *msg = l_ptr->pmsg;
- u32 msg_size = sizeof(l_ptr->proto_msg);
- int r_flag;
- u16 last_rcv;
-
- /* Don't send protocol message during link failover */
- if (l_ptr->flags & LINK_FAILINGOVER)
- return;
+ struct sk_buff *skb, *tnlskb;
+ struct tipc_msg *hdr, tnlhdr;
+ struct sk_buff_head *queue = &l->transmq;
+ struct sk_buff_head tmpxq, tnlq;
+ u16 pktlen, pktcnt, seqno = l->snd_nxt;
- /* Abort non-RESET send if communication with node is prohibited */
- if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
+ if (!tnl)
return;
- /* Create protocol message with "out-of-sequence" sequence number */
- msg_set_type(msg, msg_typ);
- msg_set_net_plane(msg, l_ptr->net_plane);
- msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
-
- if (msg_typ == STATE_MSG) {
- u16 next_sent = l_ptr->snd_nxt;
+ skb_queue_head_init(&tnlq);
+ skb_queue_head_init(&tmpxq);
- if (!tipc_link_is_up(l_ptr))
+ /* At least one packet required for safe algorithm => add dummy */
+ skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
+ 0, 0, TIPC_ERR_NO_PORT);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+ skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, &tmpxq);
+ __skb_queue_purge(&tmpxq);
+
+ /* Initialize reusable tunnel packet header */
+ tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
+ mtyp, INT_H_SIZE, l->addr);
+ pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
+ msg_set_msgcnt(&tnlhdr, pktcnt);
+ msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
+tnl:
+ /* Wrap each packet into a tunnel packet */
+ skb_queue_walk(queue, skb) {
+ hdr = buf_msg(skb);
+ if (queue == &l->backlogq)
+ msg_set_seqno(hdr, seqno++);
+ pktlen = msg_size(hdr);
+ msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
+ tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+ if (!tnlskb) {
+ pr_warn("%sunable to send packet\n", link_co_err);
return;
- msg_set_next_sent(msg, next_sent);
- if (!skb_queue_empty(&l_ptr->deferdq)) {
- last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
- gap = mod(last_rcv - l_ptr->rcv_nxt);
}
- msg_set_seq_gap(msg, gap);
- if (gap)
- l_ptr->stats.sent_nacks++;
- msg_set_link_tolerance(msg, tolerance);
- msg_set_linkprio(msg, priority);
- msg_set_max_pkt(msg, l_ptr->mtu);
- msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
- msg_set_probe(msg, probe_msg != 0);
- if (probe_msg)
- l_ptr->stats.sent_probes++;
- l_ptr->stats.sent_states++;
- } else { /* RESET_MSG or ACTIVATE_MSG */
- msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
- msg_set_seq_gap(msg, 0);
- msg_set_next_sent(msg, 1);
- msg_set_probe(msg, 0);
- msg_set_link_tolerance(msg, l_ptr->tolerance);
- msg_set_linkprio(msg, l_ptr->priority);
- msg_set_max_pkt(msg, l_ptr->advertised_mtu);
+ skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
+ skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
+ __skb_queue_tail(&tnlq, tnlskb);
+ }
+ if (queue != &l->backlogq) {
+ queue = &l->backlogq;
+ goto tnl;
}
- r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
- msg_set_redundant_link(msg, r_flag);
- msg_set_linkprio(msg, l_ptr->priority);
- msg_set_size(msg, msg_size);
-
- msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
-
- buf = tipc_buf_acquire(msg_size);
- if (!buf)
- return;
+ tipc_link_xmit(tnl, &tnlq, xmitq);
- skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
- buf->priority = TC_PRIO_CONTROL;
- tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
- &l_ptr->media_addr);
- l_ptr->rcv_unacked = 0;
- kfree_skb(buf);
+ if (mtyp == FAILOVER_MSG) {
+ tnl->drop_point = l->rcv_nxt;
+ tnl->failover_reasm_skb = l->reasm_buf;
+ l->reasm_buf = NULL;
+ }
}
-/*
- * Receive protocol message :
+/* tipc_link_proto_rcv(): receive link level protocol message :
* Note that network plane id propagates through the network, and may
- * change at any time. The node with lowest address rules
+ * change at any time. The node with lowest numerical id determines
+ * network plane
*/
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
- struct sk_buff *buf)
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
- u32 rec_gap = 0;
- u32 msg_tol;
- struct tipc_msg *msg = buf_msg(buf);
+ struct tipc_msg *hdr = buf_msg(skb);
+ u16 rcvgap = 0;
+ u16 nacked_gap = msg_seq_gap(hdr);
+ u16 peers_snd_nxt = msg_next_sent(hdr);
+ u16 peers_tol = msg_link_tolerance(hdr);
+ u16 peers_prio = msg_linkprio(hdr);
+ char *if_name;
+ int rc = 0;
- if (l_ptr->flags & LINK_FAILINGOVER)
+ if (tipc_link_is_blocked(l))
goto exit;
- if (l_ptr->net_plane != msg_net_plane(msg))
- if (link_own_addr(l_ptr) > msg_prevnode(msg))
- l_ptr->net_plane = msg_net_plane(msg);
-
- switch (msg_type(msg)) {
+ if (link_own_addr(l) > msg_prevnode(hdr))
+ l->net_plane = msg_net_plane(hdr);
+ switch (msg_type(hdr)) {
case RESET_MSG:
- if (!link_working_unknown(l_ptr) &&
- (l_ptr->peer_session != INVALID_SESSION)) {
- if (less_eq(msg_session(msg), l_ptr->peer_session))
- break; /* duplicate or old reset: ignore */
- }
-
- if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
- link_working_unknown(l_ptr))) {
- /*
- * peer has lost contact -- don't allow peer's links
- * to reactivate before we recognize loss & clean up
- */
- l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
- }
-
- link_state_event(l_ptr, RESET_MSG);
+ /* Ignore duplicate RESET with old session number */
+ if ((less_eq(msg_session(hdr), l->peer_session)) &&
+ (l->peer_session != WILDCARD_SESSION))
+ break;
/* fall thru' */
- case ACTIVATE_MSG:
- /* Update link settings according other endpoint's values */
- strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
- msg_tol = msg_link_tolerance(msg);
- if (msg_tol > l_ptr->tolerance)
- link_set_supervision_props(l_ptr, msg_tol);
-
- if (msg_linkprio(msg) > l_ptr->priority)
- l_ptr->priority = msg_linkprio(msg);
-
- if (l_ptr->mtu > msg_max_pkt(msg))
- l_ptr->mtu = msg_max_pkt(msg);
-
- /* Synchronize broadcast link info, if not done previously */
- if (!tipc_node_is_up(l_ptr->owner)) {
- l_ptr->owner->bclink.last_sent =
- l_ptr->owner->bclink.last_in =
- msg_last_bcast(msg);
- l_ptr->owner->bclink.oos_state = 0;
- }
-
- l_ptr->peer_session = msg_session(msg);
- l_ptr->peer_bearer_id = msg_bearer_id(msg);
-
- if (msg_type(msg) == ACTIVATE_MSG)
- link_state_event(l_ptr, ACTIVATE_MSG);
- break;
- case STATE_MSG:
+ case ACTIVATE_MSG:
- msg_tol = msg_link_tolerance(msg);
- if (msg_tol)
- link_set_supervision_props(l_ptr, msg_tol);
-
- if (msg_linkprio(msg) &&
- (msg_linkprio(msg) != l_ptr->priority)) {
- pr_debug("%s<%s>, priority change %u->%u\n",
- link_rst_msg, l_ptr->name,
- l_ptr->priority, msg_linkprio(msg));
- l_ptr->priority = msg_linkprio(msg);
- tipc_link_reset(l_ptr); /* Enforce change to take effect */
+ /* Complete own link name with peer's interface name */
+ if_name = strrchr(l->name, ':') + 1;
+ if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
break;
- }
-
- /* Record reception; force mismatch at next timeout: */
- l_ptr->silent_intv_cnt = 0;
-
- link_state_event(l_ptr, TRAFFIC_MSG_EVT);
- l_ptr->stats.recv_states++;
- if (link_reset_unknown(l_ptr))
+ if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
break;
+ strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
- if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
- rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
+ /* Update own tolerance if peer indicates a non-zero value */
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ l->tolerance = peers_tol;
- if (msg_probe(msg))
- l_ptr->stats.recv_probes++;
+ /* Update own priority if peer's priority is higher */
+ if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
+ l->priority = peers_prio;
- /* Protocol message before retransmits, reduce loss risk */
- if (l_ptr->owner->bclink.recv_permitted)
- tipc_bclink_update_link_state(l_ptr->owner,
- msg_last_bcast(msg));
-
- if (rec_gap || (msg_probe(msg))) {
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
- rec_gap, 0, 0);
- }
- if (msg_seq_gap(msg)) {
- l_ptr->stats.recv_nacks++;
- tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
- msg_seq_gap(msg));
+ if (msg_type(hdr) == RESET_MSG) {
+ rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+ } else if (!link_is_up(l)) {
+ tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+ rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
}
+ l->peer_session = msg_session(hdr);
+ l->peer_bearer_id = msg_bearer_id(hdr);
+ if (l->mtu > msg_max_pkt(hdr))
+ l->mtu = msg_max_pkt(hdr);
break;
- }
-exit:
- kfree_skb(buf);
-}
-
-
-/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
- * a different bearer. Owner node is locked.
- */
-static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
- struct tipc_msg *tunnel_hdr,
- struct tipc_msg *msg,
- u32 selector)
-{
- struct tipc_link *tunnel;
- struct sk_buff *skb;
- u32 length = msg_size(msg);
-
- tunnel = l_ptr->owner->active_links[selector & 1];
- if (!tipc_link_is_up(tunnel)) {
- pr_warn("%stunnel link no longer available\n", link_co_err);
- return;
- }
- msg_set_size(tunnel_hdr, length + INT_H_SIZE);
- skb = tipc_buf_acquire(length + INT_H_SIZE);
- if (!skb) {
- pr_warn("%sunable to send tunnel msg\n", link_co_err);
- return;
- }
- skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
- skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
- __tipc_link_xmit_skb(tunnel, skb);
-}
-
-
-/* tipc_link_failover_send_queue(): A link has gone down, but a second
- * link is still active. We can do failover. Tunnel the failing link's
- * whole send queue via the remaining link. This way, we don't lose
- * any packets, and sequence order is preserved for subsequent traffic
- * sent over the remaining link. Owner node is locked.
- */
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
-{
- int msgcount;
- struct tipc_link *tunnel = l_ptr->owner->active_links[0];
- struct tipc_msg tunnel_hdr;
- struct sk_buff *skb;
- int split_bundles;
-
- if (!tunnel)
- return;
- tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
- FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
-
- skb_queue_walk(&l_ptr->backlogq, skb) {
- msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
- l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
- }
- skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
- tipc_link_purge_backlog(l_ptr);
- msgcount = skb_queue_len(&l_ptr->transmq);
- msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
- msg_set_msgcnt(&tunnel_hdr, msgcount);
-
- if (skb_queue_empty(&l_ptr->transmq)) {
- skb = tipc_buf_acquire(INT_H_SIZE);
- if (skb) {
- skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
- msg_set_size(&tunnel_hdr, INT_H_SIZE);
- __tipc_link_xmit_skb(tunnel, skb);
- } else {
- pr_warn("%sunable to send changeover msg\n",
- link_co_err);
- }
- return;
- }
-
- split_bundles = (l_ptr->owner->active_links[0] !=
- l_ptr->owner->active_links[1]);
+ case STATE_MSG:
- skb_queue_walk(&l_ptr->transmq, skb) {
- struct tipc_msg *msg = buf_msg(skb);
+ /* Update own tolerance if peer indicates a non-zero value */
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ l->tolerance = peers_tol;
- if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
- struct tipc_msg *m = msg_get_wrapped(msg);
- unchar *pos = (unchar *)m;
+ l->silent_intv_cnt = 0;
+ l->stats.recv_states++;
+ if (msg_probe(hdr))
+ l->stats.recv_probes++;
+ rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+ if (!link_is_up(l))
+ break;
- msgcount = msg_msgcnt(msg);
- while (msgcount--) {
- msg_set_seqno(m, msg_seqno(msg));
- tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
- msg_link_selector(m));
- pos += align(msg_size(m));
- m = (struct tipc_msg *)pos;
- }
- } else {
- tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
- msg_link_selector(msg));
+ /* Send NACK if peer has sent pkts we haven't received yet */
+ if (more(peers_snd_nxt, l->rcv_nxt))
+ rcvgap = peers_snd_nxt - l->rcv_nxt;
+ if (rcvgap || (msg_probe(hdr)))
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
+ 0, 0, xmitq);
+ tipc_link_release_pkts(l, msg_ack(hdr));
+
+ /* If NACK, retransmit will now start at right position */
+ if (nacked_gap) {
+ rc = tipc_link_retransm(l, nacked_gap, xmitq);
+ l->stats.recv_nacks++;
}
- }
-}
-
-/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
- * duplicate of the first link's send queue via the new link. This way, we
- * are guaranteed that currently queued packets from a socket are delivered
- * before future traffic from the same socket, even if this is using the
- * new link. The last arriving copy of each duplicate packet is dropped at
- * the receiving end by the regular protocol check, so packet cardinality
- * and sequence order is preserved per sender/receiver socket pair.
- * Owner node is locked.
- */
-void tipc_link_dup_queue_xmit(struct tipc_link *link,
- struct tipc_link *tnl)
-{
- struct sk_buff *skb;
- struct tipc_msg tnl_hdr;
- struct sk_buff_head *queue = &link->transmq;
- int mcnt;
- u16 seqno;
-
- tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
- SYNCH_MSG, INT_H_SIZE, link->addr);
- mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
- msg_set_msgcnt(&tnl_hdr, mcnt);
- msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
-
-tunnel_queue:
- skb_queue_walk(queue, skb) {
- struct sk_buff *outskb;
- struct tipc_msg *msg = buf_msg(skb);
- u32 len = msg_size(msg);
- msg_set_ack(msg, mod(link->rcv_nxt - 1));
- msg_set_bcast_ack(msg, link->owner->bclink.last_in);
- msg_set_size(&tnl_hdr, len + INT_H_SIZE);
- outskb = tipc_buf_acquire(len + INT_H_SIZE);
- if (outskb == NULL) {
- pr_warn("%sunable to send duplicate msg\n",
- link_co_err);
- return;
- }
- skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
- skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
- skb->data, len);
- __tipc_link_xmit_skb(tnl, outskb);
- if (!tipc_link_is_up(link))
- return;
- }
- if (queue == &link->backlogq)
- return;
- seqno = link->snd_nxt;
- skb_queue_walk(&link->backlogq, skb) {
- msg_set_seqno(buf_msg(skb), seqno);
- seqno = mod(seqno + 1);
- }
- queue = &link->backlogq;
- goto tunnel_queue;
-}
-
-/* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
- * Owner node is locked.
- */
-static bool tipc_link_failover_rcv(struct tipc_link *link,
- struct sk_buff **skb)
-{
- struct tipc_msg *msg = buf_msg(*skb);
- struct sk_buff *iskb = NULL;
- struct tipc_link *pl = NULL;
- int bearer_id = msg_bearer_id(msg);
- int pos = 0;
-
- if (msg_type(msg) != FAILOVER_MSG) {
- pr_warn("%sunknown tunnel pkt received\n", link_co_err);
- goto exit;
- }
- if (bearer_id >= MAX_BEARERS)
- goto exit;
-
- if (bearer_id == link->bearer_id)
- goto exit;
-
- pl = link->owner->links[bearer_id];
- if (pl && tipc_link_is_up(pl))
- tipc_link_reset(pl);
-
- if (link->failover_pkts == FIRST_FAILOVER)
- link->failover_pkts = msg_msgcnt(msg);
-
- /* Should we expect an inner packet? */
- if (!link->failover_pkts)
- goto exit;
-
- if (!tipc_msg_extract(*skb, &iskb, &pos)) {
- pr_warn("%sno inner failover pkt\n", link_co_err);
- *skb = NULL;
- goto exit;
- }
- link->failover_pkts--;
- *skb = NULL;
-
- /* Was this packet already delivered? */
- if (less(buf_seqno(iskb), link->failover_checkpt)) {
- kfree_skb(iskb);
- iskb = NULL;
- goto exit;
- }
- if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
- link->stats.recv_fragments++;
- tipc_buf_append(&link->failover_skb, &iskb);
+ tipc_link_advance_backlog(l, xmitq);
+ if (unlikely(!skb_queue_empty(&l->wakeupq)))
+ link_prepare_wakeup(l);
}
exit:
- if (!link->failover_pkts && pl)
- pl->flags &= ~LINK_FAILINGOVER;
- kfree_skb(*skb);
- *skb = iskb;
- return *skb;
-}
-
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
-{
- unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
-
- if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
- return;
-
- l_ptr->tolerance = tol;
- l_ptr->keepalive_intv = msecs_to_jiffies(intv);
- l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
+ kfree_skb(skb);
+ return rc;
}
void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1743,7 +1449,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
tipc_node_lock(n_ptr);
for (i = 0; i < MAX_BEARERS; i++) {
- l_ptr = n_ptr->links[i];
+ l_ptr = n_ptr->links[i].link;
if (l_ptr && !strcmp(l_ptr->name, link_name)) {
*bearer_id = i;
found_node = n_ptr;
@@ -1770,27 +1476,16 @@ static void link_reset_statistics(struct tipc_link *l_ptr)
l_ptr->stats.recv_info = l_ptr->rcv_nxt;
}
-static void link_print(struct tipc_link *l_ptr, const char *str)
+static void link_print(struct tipc_link *l, const char *str)
{
- struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
- struct tipc_bearer *b_ptr;
-
- rcu_read_lock();
- b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
- if (b_ptr)
- pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
- rcu_read_unlock();
-
- if (link_working_unknown(l_ptr))
- pr_cont(":WU\n");
- else if (link_reset_reset(l_ptr))
- pr_cont(":RR\n");
- else if (link_reset_unknown(l_ptr))
- pr_cont(":RU\n");
- else if (link_working_working(l_ptr))
- pr_cont(":WW\n");
- else
- pr_cont("\n");
+ struct sk_buff *hskb = skb_peek(&l->transmq);
+ u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
+ u16 tail = l->snd_nxt - 1;
+
+ pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
+ pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
+ skb_queue_len(&l->transmq), head, tail,
+ skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
}
/* Parse and validate nested (link) properties valid for media, bearer and link
@@ -1865,7 +1560,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
tipc_node_lock(node);
- link = node->links[bearer_id];
+ link = node->links[bearer_id].link;
if (!link) {
res = -EINVAL;
goto out;
@@ -1885,7 +1580,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
u32 tol;
tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
- link_set_supervision_props(link, tol);
+ link->tolerance = tol;
tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
}
if (props[TIPC_NLA_PROP_PRIO]) {
@@ -2055,10 +1750,11 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
for (i = *prev_link; i < MAX_BEARERS; i++) {
*prev_link = i;
- if (!node->links[i])
+ if (!node->links[i].link)
continue;
- err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
+ err = __tipc_nl_add_link(net, msg,
+ node->links[i].link, NLM_F_MULTI);
if (err)
return err;
}
@@ -2172,7 +1868,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
tipc_node_lock(node);
- link = node->links[bearer_id];
+ link = node->links[bearer_id].link;
if (!link) {
tipc_node_unlock(node);
nlmsg_free(msg.skb);
@@ -2227,7 +1923,7 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
tipc_node_lock(node);
- link = node->links[bearer_id];
+ link = node->links[bearer_id].link;
if (!link) {
tipc_node_unlock(node);
return -EINVAL;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index ae0a0ea572f2..39ff8b6919a4 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -49,19 +49,25 @@
*/
#define INVALID_LINK_SEQ 0x10000
-/* Link working states
+/* Link FSM events:
*/
-#define WORKING_WORKING 560810u
-#define WORKING_UNKNOWN 560811u
-#define RESET_UNKNOWN 560812u
-#define RESET_RESET 560813u
+enum {
+ LINK_ESTABLISH_EVT = 0xec1ab1e,
+ LINK_PEER_RESET_EVT = 0x9eed0e,
+ LINK_FAILURE_EVT = 0xfa110e,
+ LINK_RESET_EVT = 0x10ca1d0e,
+ LINK_FAILOVER_BEGIN_EVT = 0xfa110bee,
+ LINK_FAILOVER_END_EVT = 0xfa110ede,
+ LINK_SYNCH_BEGIN_EVT = 0xc1ccbee,
+ LINK_SYNCH_END_EVT = 0xc1ccede
+};
-/* Link endpoint execution states
+/* Events returned from link at packet reception or at timeout
*/
-#define LINK_STARTED 0x0001
-#define LINK_STOPPED 0x0002
-#define LINK_SYNCHING 0x0004
-#define LINK_FAILINGOVER 0x0008
+enum {
+ TIPC_LINK_UP_EVT = 1,
+ TIPC_LINK_DOWN_EVT = (1 << 1)
+};
/* Starting value for maximum packet size negotiation on unicast links
* (unless bearer MTU is less)
@@ -106,7 +112,6 @@ struct tipc_stats {
* @timer: link timer
* @owner: pointer to peer node
* @refcnt: reference counter for permanent references (owner node & timer)
- * @flags: execution state flags for link endpoint instance
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
* @bearer_id: local bearer id used by link
@@ -143,20 +148,17 @@ struct tipc_stats {
struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
- struct tipc_media_addr media_addr;
- struct timer_list timer;
+ struct tipc_media_addr *media_addr;
struct tipc_node *owner;
- struct kref ref;
/* Management and link supervision data */
- unsigned int flags;
u32 peer_session;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
unsigned long keepalive_intv;
u32 abort_limit;
- int state;
+ u32 state;
u32 silent_intv_cnt;
struct {
unchar hdr[INT_H_SIZE];
@@ -165,12 +167,10 @@ struct tipc_link {
struct tipc_msg *pmsg;
u32 priority;
char net_plane;
- u16 synch_point;
- /* Failover */
- u16 failover_pkts;
- u16 failover_checkpt;
- struct sk_buff *failover_skb;
+ /* Failover/synch */
+ u16 drop_point;
+ struct sk_buff *failover_reasm_skb;
/* Max packet negotiation */
u16 mtu;
@@ -192,8 +192,8 @@ struct tipc_link {
u16 rcv_nxt;
u32 rcv_unacked;
struct sk_buff_head deferdq;
- struct sk_buff_head inputq;
- struct sk_buff_head namedq;
+ struct sk_buff_head *inputq;
+ struct sk_buff_head *namedq;
/* Congestion handling */
struct sk_buff_head wakeupq;
@@ -205,28 +205,29 @@ struct tipc_link {
struct tipc_stats stats;
};
-struct tipc_port;
-
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
- struct tipc_bearer *b_ptr,
- const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
-void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+ u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+ struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+ struct tipc_link **link);
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq);
+int tipc_link_fsm_evt(struct tipc_link *l, int evt);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
-int tipc_link_is_up(struct tipc_link *l_ptr);
+bool tipc_link_is_up(struct tipc_link *l);
+bool tipc_link_is_reset(struct tipc_link *l);
+bool tipc_link_is_synching(struct tipc_link *l);
+bool tipc_link_is_failingover(struct tipc_link *l);
+bool tipc_link_is_blocked(struct tipc_link *l);
int tipc_link_is_active(struct tipc_link *l_ptr);
void tipc_link_purge_queues(struct tipc_link *l_ptr);
void tipc_link_purge_backlog(struct tipc_link *l);
-void tipc_link_reset_all(struct tipc_node *node);
void tipc_link_reset(struct tipc_link *l_ptr);
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
- u32 selector);
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
- u32 selector);
int __tipc_link_xmit(struct net *net, struct tipc_link *link,
struct sk_buff_head *list);
+int tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list,
+ struct sk_buff_head *xmitq);
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
u32 gap, u32 tolerance, u32 priority);
void tipc_link_push_packets(struct tipc_link *l_ptr);
@@ -242,34 +243,8 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
-void link_prepare_wakeup(struct tipc_link *l);
-
-static inline u32 link_own_addr(struct tipc_link *l)
-{
- return msg_prevnode(l->pmsg);
-}
-
-/*
- * Link status checking routines
- */
-static inline int link_working_working(struct tipc_link *l_ptr)
-{
- return l_ptr->state == WORKING_WORKING;
-}
-
-static inline int link_working_unknown(struct tipc_link *l_ptr)
-{
- return l_ptr->state == WORKING_UNKNOWN;
-}
-
-static inline int link_reset_unknown(struct tipc_link *l_ptr)
-{
- return l_ptr->state == RESET_UNKNOWN;
-}
-
-static inline int link_reset_reset(struct tipc_link *l_ptr)
-{
- return l_ptr->state == RESET_RESET;
-}
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 08b4cc7d496d..562c926a51cc 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -463,60 +463,72 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
/**
* tipc_msg_reverse(): swap source and destination addresses and add error code
- * @buf: buffer containing message to be reversed
- * @dnode: return value: node where to send message after reversal
- * @err: error code to be set in message
- * Consumes buffer if failure
+ * @own_node: originating node id for reversed message
+ * @skb: buffer containing message to be reversed; may be replaced.
+ * @err: error code to be set in message, if any
+ * Consumes buffer at failure
* Returns true if success, otherwise false
*/
-bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
- int err)
+bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
{
- struct tipc_msg *msg = buf_msg(buf);
+ struct sk_buff *_skb = *skb;
+ struct tipc_msg *hdr = buf_msg(_skb);
struct tipc_msg ohdr;
- uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
+ int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
- if (skb_linearize(buf))
+ if (skb_linearize(_skb))
goto exit;
- msg = buf_msg(buf);
- if (msg_dest_droppable(msg))
+ hdr = buf_msg(_skb);
+ if (msg_dest_droppable(hdr))
goto exit;
- if (msg_errcode(msg))
+ if (msg_errcode(hdr))
goto exit;
- memcpy(&ohdr, msg, msg_hdr_sz(msg));
- msg_set_errcode(msg, err);
- msg_set_origport(msg, msg_destport(&ohdr));
- msg_set_destport(msg, msg_origport(&ohdr));
- msg_set_prevnode(msg, own_addr);
- if (!msg_short(msg)) {
- msg_set_orignode(msg, msg_destnode(&ohdr));
- msg_set_destnode(msg, msg_orignode(&ohdr));
+
+ /* Take a copy of original header before altering message */
+ memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
+
+ /* Never return SHORT header; expand by replacing buffer if necessary */
+ if (msg_short(hdr)) {
+ *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+ if (!*skb)
+ goto exit;
+ memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
+ kfree_skb(_skb);
+ _skb = *skb;
+ hdr = buf_msg(_skb);
+ memcpy(hdr, &ohdr, BASIC_H_SIZE);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
}
- msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
- skb_trim(buf, msg_size(msg));
- skb_orphan(buf);
- *dnode = msg_orignode(&ohdr);
+
+ /* Now reverse the concerned fields */
+ msg_set_errcode(hdr, err);
+ msg_set_origport(hdr, msg_destport(&ohdr));
+ msg_set_destport(hdr, msg_origport(&ohdr));
+ msg_set_destnode(hdr, msg_prevnode(&ohdr));
+ msg_set_prevnode(hdr, own_node);
+ msg_set_orignode(hdr, own_node);
+ msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
+ skb_trim(_skb, msg_size(hdr));
+ skb_orphan(_skb);
return true;
exit:
- kfree_skb(buf);
- *dnode = 0;
+ kfree_skb(_skb);
+ *skb = NULL;
return false;
}
/**
* tipc_msg_lookup_dest(): try to find new destination for named message
* @skb: the buffer containing the message.
- * @dnode: return value: next-hop node, if destination found
- * @err: return value: error code to use, if message to be rejected
+ * @err: error code to be used by caller if lookup fails
* Does not consume buffer
* Returns true if a destination is found, false otherwise
*/
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
- u32 *dnode, int *err)
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
{
struct tipc_msg *msg = buf_msg(skb);
- u32 dport;
- u32 own_addr = tipc_own_addr(net);
+ u32 dport, dnode;
+ u32 onode = tipc_own_addr(net);
if (!msg_isdata(msg))
return false;
@@ -529,15 +541,15 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
return false;
if (msg_reroute_cnt(msg))
return false;
- *dnode = addr_domain(net, msg_lookup_scope(msg));
+ dnode = addr_domain(net, msg_lookup_scope(msg));
dport = tipc_nametbl_translate(net, msg_nametype(msg),
- msg_nameinst(msg), dnode);
+ msg_nameinst(msg), &dnode);
if (!dport)
return false;
msg_incr_reroute_cnt(msg);
- if (*dnode != own_addr)
- msg_set_prevnode(msg, own_addr);
- msg_set_destnode(msg, *dnode);
+ if (dnode != onode)
+ msg_set_prevnode(msg, onode);
+ msg_set_destnode(msg, dnode);
msg_set_destport(msg, dport);
*err = TIPC_OK;
return true;
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 19c45fb66238..a82c5848d4bc 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -38,6 +38,7 @@
#define _TIPC_MSG_H
#include <linux/tipc.h>
+#include "core.h"
/*
* Constants and routines used to read and write TIPC payload message headers
@@ -109,7 +110,6 @@ struct tipc_skb_cb {
struct sk_buff *tail;
bool validated;
bool wakeup_pending;
- bool bundling;
u16 chain_sz;
u16 chain_imp;
};
@@ -558,15 +558,6 @@ static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
msg_set_bits(m, 1, 15, 0x1fff, n);
}
-static inline bool msg_dup(struct tipc_msg *m)
-{
- if (likely(msg_user(m) != TUNNEL_PROTOCOL))
- return false;
- if (msg_type(m) != SYNCH_MSG)
- return false;
- return true;
-}
-
/*
* Word 2
*/
@@ -620,12 +611,12 @@ static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
}
-static inline u32 msg_next_sent(struct tipc_msg *m)
+static inline u16 msg_next_sent(struct tipc_msg *m)
{
return msg_bits(m, 4, 0, 0xffff);
}
-static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
+static inline void msg_set_next_sent(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 4, 0, 0xffff, n);
}
@@ -658,12 +649,12 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
/*
* Word 5
*/
-static inline u32 msg_session(struct tipc_msg *m)
+static inline u16 msg_session(struct tipc_msg *m)
{
return msg_bits(m, 5, 16, 0xffff);
}
-static inline void msg_set_session(struct tipc_msg *m, u32 n)
+static inline void msg_set_session(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 5, 16, 0xffff, n);
}
@@ -726,12 +717,12 @@ static inline char *msg_media_addr(struct tipc_msg *m)
/*
* Word 9
*/
-static inline u32 msg_msgcnt(struct tipc_msg *m)
+static inline u16 msg_msgcnt(struct tipc_msg *m)
{
return msg_bits(m, 9, 16, 0xffff);
}
-static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
+static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 9, 16, 0xffff, n);
}
@@ -766,10 +757,25 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
msg_set_bits(m, 9, 0, 0xffff, n);
}
+static inline bool msg_peer_link_is_up(struct tipc_msg *m)
+{
+ if (likely(msg_user(m) != LINK_PROTOCOL))
+ return true;
+ if (msg_type(m) == STATE_MSG)
+ return true;
+ return false;
+}
+
+static inline bool msg_peer_node_is_up(struct tipc_msg *m)
+{
+ if (msg_peer_link_is_up(m))
+ return true;
+ return msg_redundant_link(m);
+}
+
struct sk_buff *tipc_buf_acquire(u32 size);
bool tipc_msg_validate(struct sk_buff *skb);
-bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
- int err);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
u32 hsize, u32 destnode);
struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
@@ -782,8 +788,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
- int *err);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
static inline u16 buf_seqno(struct sk_buff *skb)
@@ -857,26 +862,65 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
return skb;
}
-/* tipc_skb_queue_tail(): add buffer to tail of list;
+/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
* @list: list to be appended to
- * @skb: buffer to append. Always appended
- * @dport: the destination port of the buffer
- * returns true if dport differs from previous destination
+ * @skb: buffer to add
+ * Returns true if queue should treated further, otherwise false
*/
-static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
- struct sk_buff *skb, u32 dport)
+static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list,
+ struct sk_buff *skb)
{
- struct sk_buff *_skb = NULL;
- bool rv = false;
+ struct sk_buff *_skb, *tmp;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u16 seqno = msg_seqno(hdr);
- spin_lock_bh(&list->lock);
- _skb = skb_peek_tail(list);
- if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
- (skb_queue_len(list) > 32))
- rv = true;
+ if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) {
+ __skb_queue_head(list, skb);
+ return true;
+ }
+ if (likely(less(seqno, buf_seqno(skb_peek(list))))) {
+ __skb_queue_head(list, skb);
+ return true;
+ }
+ if (!more(seqno, buf_seqno(skb_peek_tail(list)))) {
+ skb_queue_walk_safe(list, _skb, tmp) {
+ if (likely(less(seqno, buf_seqno(_skb)))) {
+ __skb_queue_before(list, _skb, skb);
+ return true;
+ }
+ }
+ }
__skb_queue_tail(list, skb);
+ return false;
+}
+
+/* tipc_skb_queue_splice_tail - append an skb list to lock protected list
+ * @list: the new list to append. Not lock protected
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ spin_lock_bh(&head->lock);
+ skb_queue_splice_tail(list, head);
+ spin_unlock_bh(&head->lock);
+}
+
+/* tipc_skb_queue_splice_tail_init - merge two lock protected skb lists
+ * @list: the new list to add. Lock protected. Will be reinitialized
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail_init(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ struct sk_buff_head tmp;
+
+ __skb_queue_head_init(&tmp);
+
+ spin_lock_bh(&list->lock);
+ skb_queue_splice_tail_init(list, &tmp);
spin_unlock_bh(&list->lock);
- return rv;
+ tipc_skb_queue_splice_tail(&tmp, head);
}
#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 41e7b7e4dda0..e6018b7eb197 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -96,13 +96,13 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb)
dnode = node->addr;
if (in_own_node(net, dnode))
continue;
- if (!tipc_node_active_links(node))
+ if (!tipc_node_is_up(node))
continue;
oskb = pskb_copy(skb, GFP_ATOMIC);
if (!oskb)
break;
msg_set_destnode(buf_msg(oskb), dnode);
- tipc_link_xmit_skb(net, oskb, dnode, dnode);
+ tipc_node_xmit_skb(net, oskb, dnode, dnode);
}
rcu_read_unlock();
@@ -223,7 +223,7 @@ void tipc_named_node_up(struct net *net, u32 dnode)
&tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
rcu_read_unlock();
- tipc_link_xmit(net, &head, dnode, dnode);
+ tipc_node_xmit(net, &head, dnode, dnode);
}
static void tipc_publ_subscribe(struct net *net, struct publication *publ,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0b1d61a5f853..7c191641b44f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -40,10 +40,42 @@
#include "name_distr.h"
#include "socket.h"
#include "bcast.h"
+#include "discover.h"
-static void node_lost_contact(struct tipc_node *n_ptr);
+/* Node FSM states and events:
+ */
+enum {
+ SELF_DOWN_PEER_DOWN = 0xdd,
+ SELF_UP_PEER_UP = 0xaa,
+ SELF_DOWN_PEER_LEAVING = 0xd1,
+ SELF_UP_PEER_COMING = 0xac,
+ SELF_COMING_PEER_UP = 0xca,
+ SELF_LEAVING_PEER_DOWN = 0x1d,
+ NODE_FAILINGOVER = 0xf0,
+ NODE_SYNCHING = 0xcc
+};
+
+enum {
+ SELF_ESTABL_CONTACT_EVT = 0xece,
+ SELF_LOST_CONTACT_EVT = 0x1ce,
+ PEER_ESTABL_CONTACT_EVT = 0x9ece,
+ PEER_LOST_CONTACT_EVT = 0x91ce,
+ NODE_FAILOVER_BEGIN_EVT = 0xfbe,
+ NODE_FAILOVER_END_EVT = 0xfee,
+ NODE_SYNCH_BEGIN_EVT = 0xcbe,
+ NODE_SYNCH_END_EVT = 0xcee
+};
+
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr **maddr);
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
+ bool delete);
+static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
static void node_established_contact(struct tipc_node *n_ptr);
static void tipc_node_delete(struct tipc_node *node);
+static void tipc_node_timeout(unsigned long data);
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
struct tipc_sock_conn {
u32 port;
@@ -110,7 +142,7 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr)
return NULL;
}
-struct tipc_node *tipc_node_create(struct net *net, u32 addr)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *n_ptr, *temp_node;
@@ -126,12 +158,14 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
}
n_ptr->addr = addr;
n_ptr->net = net;
+ n_ptr->capabilities = capabilities;
kref_init(&n_ptr->kref);
spin_lock_init(&n_ptr->lock);
INIT_HLIST_NODE(&n_ptr->hash);
INIT_LIST_HEAD(&n_ptr->list);
INIT_LIST_HEAD(&n_ptr->publ_list);
INIT_LIST_HEAD(&n_ptr->conn_sks);
+ skb_queue_head_init(&n_ptr->bclink.namedq);
__skb_queue_head_init(&n_ptr->bclink.deferdq);
hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
@@ -139,14 +173,32 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
break;
}
list_add_tail_rcu(&n_ptr->list, &temp_node->list);
- n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
+ n_ptr->state = SELF_DOWN_PEER_LEAVING;
n_ptr->signature = INVALID_NODE_SIG;
+ n_ptr->active_links[0] = INVALID_BEARER_ID;
+ n_ptr->active_links[1] = INVALID_BEARER_ID;
tipc_node_get(n_ptr);
+ setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
+ n_ptr->keepalive_intv = U32_MAX;
exit:
spin_unlock_bh(&tn->node_list_lock);
return n_ptr;
}
+static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
+{
+ unsigned long tol = l->tolerance;
+ unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
+ unsigned long keepalive_intv = msecs_to_jiffies(intv);
+
+ /* Link with lowest tolerance determines timer interval */
+ if (keepalive_intv < n->keepalive_intv)
+ n->keepalive_intv = keepalive_intv;
+
+ /* Ensure link's abort limit corresponds to current interval */
+ l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
+}
+
static void tipc_node_delete(struct tipc_node *node)
{
list_del_rcu(&node->list);
@@ -160,8 +212,11 @@ void tipc_node_stop(struct net *net)
struct tipc_node *node, *t_node;
spin_lock_bh(&tn->node_list_lock);
- list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+ list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
+ if (del_timer(&node->timer))
+ tipc_node_put(node);
tipc_node_put(node);
+ }
spin_unlock_bh(&tn->node_list_lock);
}
@@ -219,158 +274,547 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
tipc_node_put(node);
}
+/* tipc_node_timeout - handle expiration of node timer
+ */
+static void tipc_node_timeout(unsigned long data)
+{
+ struct tipc_node *n = (struct tipc_node *)data;
+ struct tipc_link_entry *le;
+ struct sk_buff_head xmitq;
+ int bearer_id;
+ int rc = 0;
+
+ __skb_queue_head_init(&xmitq);
+
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ tipc_node_lock(n);
+ le = &n->links[bearer_id];
+ if (le->link) {
+ /* Link tolerance may change asynchronously: */
+ tipc_node_calculate_timer(n, le->link);
+ rc = tipc_link_timeout(le->link, &xmitq);
+ }
+ tipc_node_unlock(n);
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
+ if (rc & TIPC_LINK_DOWN_EVT)
+ tipc_node_link_down(n, bearer_id, false);
+ }
+ if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+ tipc_node_get(n);
+ tipc_node_put(n);
+}
+
/**
- * tipc_node_link_up - handle addition of link
- *
+ * __tipc_node_link_up - handle addition of link
+ * Node lock must be held by caller
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
+ struct sk_buff_head *xmitq)
{
- struct tipc_link **active = &n_ptr->active_links[0];
+ int *slot0 = &n->active_links[0];
+ int *slot1 = &n->active_links[1];
+ struct tipc_link *ol = node_active_link(n, 0);
+ struct tipc_link *nl = n->links[bearer_id].link;
- n_ptr->working_links++;
- n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
- n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
+ if (!nl || !tipc_link_is_up(nl))
+ return;
- pr_debug("Established link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->net_plane);
+ n->working_links++;
+ n->action_flags |= TIPC_NOTIFY_LINK_UP;
+ n->link_id = nl->peer_bearer_id << 16 | bearer_id;
- if (!active[0]) {
- active[0] = active[1] = l_ptr;
- node_established_contact(n_ptr);
- goto exit;
- }
- if (l_ptr->priority < active[0]->priority) {
- pr_debug("New link <%s> becomes standby\n", l_ptr->name);
- goto exit;
+ /* Leave room for tunnel header when returning 'mtu' to users: */
+ n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
+
+ tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+
+ pr_debug("Established link <%s> on network plane %c\n",
+ nl->name, nl->net_plane);
+
+ /* First link? => give it both slots */
+ if (!ol) {
+ *slot0 = bearer_id;
+ *slot1 = bearer_id;
+ tipc_link_build_bcast_sync_msg(nl, xmitq);
+ node_established_contact(n);
+ return;
}
- tipc_link_dup_queue_xmit(active[0], l_ptr);
- if (l_ptr->priority == active[0]->priority) {
- active[0] = l_ptr;
- goto exit;
+
+ /* Second link => redistribute slots */
+ if (nl->priority > ol->priority) {
+ pr_debug("Old link <%s> becomes standby\n", ol->name);
+ *slot0 = bearer_id;
+ *slot1 = bearer_id;
+ } else if (nl->priority == ol->priority) {
+ *slot0 = bearer_id;
+ } else {
+ pr_debug("New link <%s> is standby\n", nl->name);
}
- pr_debug("Old link <%s> becomes standby\n", active[0]->name);
- if (active[1] != active[0])
- pr_debug("Old link <%s> becomes standby\n", active[1]->name);
- active[0] = active[1] = l_ptr;
-exit:
- /* Leave room for changeover header when returning 'mtu' to users: */
- n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
- n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+
+ /* Prepare synchronization with first link */
+ tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
}
/**
- * node_select_active_links - select active link
+ * tipc_node_link_up - handle addition of link
+ *
+ * Link becomes active (alone or shared) or standby, depending on its priority.
*/
-static void node_select_active_links(struct tipc_node *n_ptr)
+static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
+ struct sk_buff_head *xmitq)
{
- struct tipc_link **active = &n_ptr->active_links[0];
- u32 i;
- u32 highest_prio = 0;
+ tipc_node_lock(n);
+ __tipc_node_link_up(n, bearer_id, xmitq);
+ tipc_node_unlock(n);
+}
- active[0] = active[1] = NULL;
+/**
+ * __tipc_node_link_down - handle loss of link
+ */
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr **maddr)
+{
+ struct tipc_link_entry *le = &n->links[*bearer_id];
+ int *slot0 = &n->active_links[0];
+ int *slot1 = &n->active_links[1];
+ int i, highest = 0;
+ struct tipc_link *l, *_l, *tnl;
+
+ l = n->links[*bearer_id].link;
+ if (!l || tipc_link_is_reset(l))
+ return;
- for (i = 0; i < MAX_BEARERS; i++) {
- struct tipc_link *l_ptr = n_ptr->links[i];
+ n->working_links--;
+ n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
+ n->link_id = l->peer_bearer_id << 16 | *bearer_id;
- if (!l_ptr || !tipc_link_is_up(l_ptr) ||
- (l_ptr->priority < highest_prio))
- continue;
+ tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
+
+ pr_debug("Lost link <%s> on network plane %c\n",
+ l->name, l->net_plane);
- if (l_ptr->priority > highest_prio) {
- highest_prio = l_ptr->priority;
- active[0] = active[1] = l_ptr;
- } else {
- active[1] = l_ptr;
+ /* Select new active link if any available */
+ *slot0 = INVALID_BEARER_ID;
+ *slot1 = INVALID_BEARER_ID;
+ for (i = 0; i < MAX_BEARERS; i++) {
+ _l = n->links[i].link;
+ if (!_l || !tipc_link_is_up(_l))
+ continue;
+ if (_l == l)
+ continue;
+ if (_l->priority < highest)
+ continue;
+ if (_l->priority > highest) {
+ highest = _l->priority;
+ *slot0 = i;
+ *slot1 = i;
+ continue;
}
+ *slot1 = i;
+ }
+
+ if (!tipc_node_is_up(n)) {
+ tipc_link_reset(l);
+ node_lost_contact(n, &le->inputq);
+ return;
}
+
+ /* There is still a working link => initiate failover */
+ tnl = node_active_link(n, 0);
+ n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
+ tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
+ tipc_link_reset(l);
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+ *maddr = &n->links[tnl->bearer_id].maddr;
+ *bearer_id = tnl->bearer_id;
}
-/**
- * tipc_node_link_down - handle loss of link
- */
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
{
- struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
- struct tipc_link **active;
+ struct tipc_link_entry *le = &n->links[bearer_id];
+ struct tipc_media_addr *maddr;
+ struct sk_buff_head xmitq;
+
+ __skb_queue_head_init(&xmitq);
+
+ tipc_node_lock(n);
+ __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
+ if (delete && le->link) {
+ kfree(le->link);
+ le->link = NULL;
+ n->link_cnt--;
+ }
+ tipc_node_unlock(n);
- n_ptr->working_links--;
- n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
- n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+ tipc_sk_rcv(n->net, &le->inputq);
+}
- if (!tipc_link_is_active(l_ptr)) {
- pr_debug("Lost standby link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->net_plane);
- return;
- }
- pr_debug("Lost link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->net_plane);
-
- active = &n_ptr->active_links[0];
- if (active[0] == l_ptr)
- active[0] = active[1];
- if (active[1] == l_ptr)
- active[1] = active[0];
- if (active[0] == l_ptr)
- node_select_active_links(n_ptr);
- if (tipc_node_is_up(n_ptr))
- tipc_link_failover_send_queue(l_ptr);
- else
- node_lost_contact(n_ptr);
-
- /* Leave room for changeover header when returning 'mtu' to users: */
- if (active[0]) {
- n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
- n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+bool tipc_node_is_up(struct tipc_node *n)
+{
+ return n->active_links[0] != INVALID_BEARER_ID;
+}
+
+void tipc_node_check_dest(struct net *net, u32 onode,
+ struct tipc_bearer *b,
+ u16 capabilities, u32 signature,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr)
+{
+ struct tipc_node *n;
+ struct tipc_link *l;
+ struct tipc_link_entry *le;
+ bool addr_match = false;
+ bool sign_match = false;
+ bool link_up = false;
+ bool accept_addr = false;
+ bool reset = true;
+
+ *dupl_addr = false;
+ *respond = false;
+
+ n = tipc_node_create(net, onode, capabilities);
+ if (!n)
return;
+
+ tipc_node_lock(n);
+
+ le = &n->links[b->identity];
+
+ /* Prepare to validate requesting node's signature and media address */
+ l = le->link;
+ link_up = l && tipc_link_is_up(l);
+ addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+ sign_match = (signature == n->signature);
+
+ /* These three flags give us eight permutations: */
+
+ if (sign_match && addr_match && link_up) {
+ /* All is fine. Do nothing. */
+ reset = false;
+ } else if (sign_match && addr_match && !link_up) {
+ /* Respond. The link will come up in due time */
+ *respond = true;
+ } else if (sign_match && !addr_match && link_up) {
+ /* Peer has changed i/f address without rebooting.
+ * If so, the link will reset soon, and the next
+ * discovery will be accepted. So we can ignore it.
+ * It may also be an cloned or malicious peer having
+ * chosen the same node address and signature as an
+ * existing one.
+ * Ignore requests until the link goes down, if ever.
+ */
+ *dupl_addr = true;
+ } else if (sign_match && !addr_match && !link_up) {
+ /* Peer link has changed i/f address without rebooting.
+ * It may also be a cloned or malicious peer; we can't
+ * distinguish between the two.
+ * The signature is correct, so we must accept.
+ */
+ accept_addr = true;
+ *respond = true;
+ } else if (!sign_match && addr_match && link_up) {
+ /* Peer node rebooted. Two possibilities:
+ * - Delayed re-discovery; this link endpoint has already
+ * reset and re-established contact with the peer, before
+ * receiving a discovery message from that node.
+ * (The peer happened to receive one from this node first).
+ * - The peer came back so fast that our side has not
+ * discovered it yet. Probing from this side will soon
+ * reset the link, since there can be no working link
+ * endpoint at the peer end, and the link will re-establish.
+ * Accept the signature, since it comes from a known peer.
+ */
+ n->signature = signature;
+ } else if (!sign_match && addr_match && !link_up) {
+ /* The peer node has rebooted.
+ * Accept signature, since it is a known peer.
+ */
+ n->signature = signature;
+ *respond = true;
+ } else if (!sign_match && !addr_match && link_up) {
+ /* Peer rebooted with new address, or a new/duplicate peer.
+ * Ignore until the link goes down, if ever.
+ */
+ *dupl_addr = true;
+ } else if (!sign_match && !addr_match && !link_up) {
+ /* Peer rebooted with new address, or it is a new peer.
+ * Accept signature and address.
+ */
+ n->signature = signature;
+ accept_addr = true;
+ *respond = true;
}
- /* Loopback link went down? No fragmentation needed from now on. */
- if (n_ptr->addr == tn->own_addr) {
- n_ptr->act_mtus[0] = MAX_MSG_SIZE;
- n_ptr->act_mtus[1] = MAX_MSG_SIZE;
+
+ if (!accept_addr)
+ goto exit;
+
+ /* Now create new link if not already existing */
+ if (!l) {
+ if (n->link_cnt == 2) {
+ pr_warn("Cannot establish 3rd link to %x\n", n->addr);
+ goto exit;
+ }
+ if (!tipc_link_create(n, b, mod(tipc_net(net)->random),
+ tipc_own_addr(net), onode, &le->maddr,
+ &le->inputq, &n->bclink.namedq, &l)) {
+ *respond = false;
+ goto exit;
+ }
+ tipc_link_reset(l);
+ le->link = l;
+ n->link_cnt++;
+ tipc_node_calculate_timer(n, l);
+ if (n->link_cnt == 1)
+ if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+ tipc_node_get(n);
}
+ memcpy(&le->maddr, maddr, sizeof(*maddr));
+exit:
+ tipc_node_unlock(n);
+ if (reset)
+ tipc_node_link_down(n, b->identity, false);
+ tipc_node_put(n);
}
-int tipc_node_active_links(struct tipc_node *n_ptr)
+void tipc_node_delete_links(struct net *net, int bearer_id)
{
- return n_ptr->active_links[0] != NULL;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_node *n;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ tipc_node_link_down(n, bearer_id, true);
+ }
+ rcu_read_unlock();
}
-int tipc_node_is_up(struct tipc_node *n_ptr)
+static void tipc_node_reset_links(struct tipc_node *n)
{
- return tipc_node_active_links(n_ptr);
+ char addr_string[16];
+ int i;
+
+ pr_warn("Resetting all links to %s\n",
+ tipc_addr_string_fill(addr_string, n->addr));
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ tipc_node_link_down(n, i, false);
+ }
}
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+/* tipc_node_fsm_evt - node finite state machine
+ * Determines when contact is allowed with peer node
+ */
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
{
- n_ptr->links[l_ptr->bearer_id] = l_ptr;
- n_ptr->link_cnt++;
+ int state = n->state;
+
+ switch (state) {
+ case SELF_DOWN_PEER_DOWN:
+ switch (evt) {
+ case SELF_ESTABL_CONTACT_EVT:
+ state = SELF_UP_PEER_COMING;
+ break;
+ case PEER_ESTABL_CONTACT_EVT:
+ state = SELF_COMING_PEER_UP;
+ break;
+ case SELF_LOST_CONTACT_EVT:
+ case PEER_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_UP_PEER_UP:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ state = NODE_SYNCHING;
+ break;
+ case NODE_FAILOVER_BEGIN_EVT:
+ state = NODE_FAILINGOVER;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ case NODE_SYNCH_END_EVT:
+ case NODE_FAILOVER_END_EVT:
+ break;
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_DOWN_PEER_LEAVING:
+ switch (evt) {
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_DOWN;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ case SELF_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_UP_PEER_COMING:
+ switch (evt) {
+ case PEER_ESTABL_CONTACT_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_COMING_PEER_UP:
+ switch (evt) {
+ case SELF_ESTABL_CONTACT_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case SELF_LOST_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_LEAVING_PEER_DOWN:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_DOWN;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ case PEER_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case NODE_FAILINGOVER:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case NODE_FAILOVER_END_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case NODE_FAILOVER_BEGIN_EVT:
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case NODE_SYNCHING:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case NODE_SYNCH_END_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case NODE_FAILOVER_BEGIN_EVT:
+ state = NODE_FAILINGOVER;
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ break;
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ default:
+ pr_err("Unknown node fsm state %x\n", state);
+ break;
+ }
+ n->state = state;
+ return;
+
+illegal_evt:
+ pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
}
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
{
- int i;
+ int state = n->state;
- for (i = 0; i < MAX_BEARERS; i++) {
- if (l_ptr != n_ptr->links[i])
- continue;
- n_ptr->links[i] = NULL;
- n_ptr->link_cnt--;
+ if (likely(state == SELF_UP_PEER_UP))
+ return true;
+
+ if (state == SELF_LEAVING_PEER_DOWN)
+ return false;
+
+ if (state == SELF_DOWN_PEER_LEAVING) {
+ if (msg_peer_node_is_up(hdr))
+ return false;
}
+
+ return true;
}
static void node_established_contact(struct tipc_node *n_ptr)
{
+ tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
n_ptr->bclink.oos_state = 0;
n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
}
-static void node_lost_contact(struct tipc_node *n_ptr)
+static void node_lost_contact(struct tipc_node *n_ptr,
+ struct sk_buff_head *inputq)
{
char addr_string[16];
struct tipc_sock_conn *conn, *safe;
+ struct tipc_link *l;
struct list_head *conns = &n_ptr->conn_sks;
struct sk_buff *skb;
struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
@@ -396,21 +840,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Abort any ongoing link failover */
for (i = 0; i < MAX_BEARERS; i++) {
- struct tipc_link *l_ptr = n_ptr->links[i];
- if (!l_ptr)
- continue;
- l_ptr->flags &= ~LINK_FAILINGOVER;
- l_ptr->failover_checkpt = 0;
- l_ptr->failover_pkts = 0;
- kfree_skb(l_ptr->failover_skb);
- l_ptr->failover_skb = NULL;
- tipc_link_reset_fragments(l_ptr);
+ l = n_ptr->links[i].link;
+ if (l)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
}
- n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
-
/* Prevent re-contact with node until cleanup is done */
- n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
+ tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
/* Notify publications from this node */
n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
@@ -421,10 +857,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
SHORT_H_SIZE, 0, tn->own_addr,
conn->peer_node, conn->port,
conn->peer_port, TIPC_ERR_NO_NODE);
- if (likely(skb)) {
- skb_queue_tail(n_ptr->inputq, skb);
- n_ptr->action_flags |= TIPC_MSG_EVT;
- }
+ if (likely(skb))
+ skb_queue_tail(inputq, skb);
list_del(&conn->list);
kfree(conn);
}
@@ -453,7 +887,7 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
goto exit;
tipc_node_lock(node);
- link = node->links[bearer_id];
+ link = node->links[bearer_id].link;
if (link) {
strncpy(linkname, link->name, len);
err = 0;
@@ -471,27 +905,20 @@ void tipc_node_unlock(struct tipc_node *node)
u32 flags = node->action_flags;
u32 link_id = 0;
struct list_head *publ_list;
- struct sk_buff_head *inputq = node->inputq;
- struct sk_buff_head *namedq;
- if (likely(!flags || (flags == TIPC_MSG_EVT))) {
- node->action_flags = 0;
+ if (likely(!flags)) {
spin_unlock_bh(&node->lock);
- if (flags == TIPC_MSG_EVT)
- tipc_sk_rcv(net, inputq);
return;
}
addr = node->addr;
link_id = node->link_id;
- namedq = node->namedq;
publ_list = &node->publ_list;
- node->action_flags &= ~(TIPC_MSG_EVT |
- TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+ node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
- TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
+ TIPC_BCAST_RESET);
spin_unlock_bh(&node->lock);
@@ -512,17 +939,11 @@ void tipc_node_unlock(struct tipc_node *node)
tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
link_id, addr);
- if (flags & TIPC_MSG_EVT)
- tipc_sk_rcv(net, inputq);
-
- if (flags & TIPC_NAMED_MSG_EVT)
- tipc_named_rcv(net, namedq);
-
if (flags & TIPC_BCAST_MSG_EVT)
tipc_bclink_input(net);
if (flags & TIPC_BCAST_RESET)
- tipc_link_reset_all(node);
+ tipc_node_reset_links(node);
}
/* Caller should hold node lock for the passed node */
@@ -559,6 +980,279 @@ msg_full:
return -EMSGSIZE;
}
+static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
+ int *bearer_id,
+ struct tipc_media_addr **maddr)
+{
+ int id = n->active_links[sel & 1];
+
+ if (unlikely(id < 0))
+ return NULL;
+
+ *bearer_id = id;
+ *maddr = &n->links[id].maddr;
+ return n->links[id].link;
+}
+
+/**
+ * tipc_node_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
+ * @list: chain of buffers containing message
+ * @dnode: address of destination node
+ * @selector: a number used for deterministic link selection
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ */
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+ u32 dnode, int selector)
+{
+ struct tipc_link *l = NULL;
+ struct tipc_node *n;
+ struct sk_buff_head xmitq;
+ struct tipc_media_addr *maddr;
+ int bearer_id;
+ int rc = -EHOSTUNREACH;
+
+ __skb_queue_head_init(&xmitq);
+ n = tipc_node_find(net, dnode);
+ if (likely(n)) {
+ tipc_node_lock(n);
+ l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
+ if (likely(l))
+ rc = tipc_link_xmit(l, list, &xmitq);
+ tipc_node_unlock(n);
+ if (unlikely(rc == -ENOBUFS))
+ tipc_node_link_down(n, bearer_id, false);
+ tipc_node_put(n);
+ }
+ if (likely(!rc)) {
+ tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
+ return 0;
+ }
+ if (likely(in_own_node(net, dnode))) {
+ tipc_sk_rcv(net, list);
+ return 0;
+ }
+ return rc;
+}
+
+/* tipc_node_xmit_skb(): send single buffer to destination
+ * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * messages, which will not be rejected
+ * The only exception is datagram messages rerouted after secondary
+ * lookup, which are rare and safe to dispose of anyway.
+ * TODO: Return real return value, and let callers use
+ * tipc_wait_for_sendpkt() where applicable
+ */
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+ u32 selector)
+{
+ struct sk_buff_head head;
+ int rc;
+
+ skb_queue_head_init(&head);
+ __skb_queue_tail(&head, skb);
+ rc = tipc_node_xmit(net, &head, dnode, selector);
+ if (rc == -ELINKCONG)
+ kfree_skb(skb);
+ return 0;
+}
+
+/**
+ * tipc_node_check_state - check and if necessary update node state
+ * @skb: TIPC packet
+ * @bearer_id: identity of bearer delivering the packet
+ * Returns true if state is ok, otherwise consumes buffer and returns false
+ */
+static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
+ int bearer_id, struct sk_buff_head *xmitq)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ int usr = msg_user(hdr);
+ int mtyp = msg_type(hdr);
+ u16 oseqno = msg_seqno(hdr);
+ u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
+ u16 exp_pkts = msg_msgcnt(hdr);
+ u16 rcv_nxt, syncpt, dlv_nxt;
+ int state = n->state;
+ struct tipc_link *l, *pl = NULL;
+ struct tipc_media_addr *maddr;
+ int i, pb_id;
+
+ l = n->links[bearer_id].link;
+ if (!l)
+ return false;
+ rcv_nxt = l->rcv_nxt;
+
+
+ if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
+ return true;
+
+ /* Find parallel link, if any */
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if ((i != bearer_id) && n->links[i].link) {
+ pl = n->links[i].link;
+ break;
+ }
+ }
+
+ /* Update node accesibility if applicable */
+ if (state == SELF_UP_PEER_COMING) {
+ if (!tipc_link_is_up(l))
+ return true;
+ if (!msg_peer_link_is_up(hdr))
+ return true;
+ tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
+ }
+
+ if (state == SELF_DOWN_PEER_LEAVING) {
+ if (msg_peer_node_is_up(hdr))
+ return false;
+ tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+ }
+
+ /* Ignore duplicate packets */
+ if (less(oseqno, rcv_nxt))
+ return true;
+
+ /* Initiate or update failover mode if applicable */
+ if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
+ syncpt = oseqno + exp_pkts - 1;
+ if (pl && tipc_link_is_up(pl)) {
+ pb_id = pl->bearer_id;
+ __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
+ tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq);
+ }
+ /* If pkts arrive out of order, use lowest calculated syncpt */
+ if (less(syncpt, n->sync_point))
+ n->sync_point = syncpt;
+ }
+
+ /* Open parallel link when tunnel link reaches synch point */
+ if ((n->state == NODE_FAILINGOVER) && !tipc_link_is_failingover(l)) {
+ if (!more(rcv_nxt, n->sync_point))
+ return true;
+ tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
+ if (pl)
+ tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
+ return true;
+ }
+
+ /* Initiate or update synch mode if applicable */
+ if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
+ syncpt = iseqno + exp_pkts - 1;
+ if (!tipc_link_is_up(l)) {
+ tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+ __tipc_node_link_up(n, bearer_id, xmitq);
+ }
+ if (n->state == SELF_UP_PEER_UP) {
+ n->sync_point = syncpt;
+ tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
+ }
+ if (less(syncpt, n->sync_point))
+ n->sync_point = syncpt;
+ }
+
+ /* Open tunnel link when parallel link reaches synch point */
+ if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
+ if (pl)
+ dlv_nxt = mod(pl->rcv_nxt - skb_queue_len(pl->inputq));
+ if (!pl || more(dlv_nxt, n->sync_point)) {
+ tipc_link_fsm_evt(l, LINK_SYNCH_END_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+ return true;
+ }
+ if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
+ return true;
+ if (usr == LINK_PROTOCOL)
+ return true;
+ return false;
+ }
+ return true;
+}
+
+/**
+ * tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer: pointer to bearer message arrived on
+ *
+ * Invoked with no locks held. Bearer pointer must point to a valid bearer
+ * structure (i.e. cannot be NULL), but bearer can be inactive.
+ */
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+{
+ struct sk_buff_head xmitq;
+ struct tipc_node *n;
+ struct tipc_msg *hdr = buf_msg(skb);
+ int usr = msg_user(hdr);
+ int bearer_id = b->identity;
+ struct tipc_link_entry *le;
+ int rc = 0;
+
+ __skb_queue_head_init(&xmitq);
+
+ /* Ensure message is well-formed */
+ if (unlikely(!tipc_msg_validate(skb)))
+ goto discard;
+
+ /* Handle arrival of a non-unicast link packet */
+ if (unlikely(msg_non_seq(hdr))) {
+ if (usr == LINK_CONFIG)
+ tipc_disc_rcv(net, skb, b);
+ else
+ tipc_bclink_rcv(net, skb);
+ return;
+ }
+
+ /* Locate neighboring node that sent packet */
+ n = tipc_node_find(net, msg_prevnode(hdr));
+ if (unlikely(!n))
+ goto discard;
+ le = &n->links[bearer_id];
+
+ tipc_node_lock(n);
+
+ /* Is reception permitted at the moment ? */
+ if (!tipc_node_filter_pkt(n, hdr))
+ goto unlock;
+
+ if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+ tipc_bclink_sync_state(n, hdr);
+
+ /* Release acked broadcast packets */
+ if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
+ tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));
+
+ /* Check and if necessary update node state */
+ if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
+ rc = tipc_link_rcv(le->link, skb, &xmitq);
+ skb = NULL;
+ }
+unlock:
+ tipc_node_unlock(n);
+
+ if (unlikely(rc & TIPC_LINK_UP_EVT))
+ tipc_node_link_up(n, bearer_id, &xmitq);
+
+ if (unlikely(rc & TIPC_LINK_DOWN_EVT))
+ tipc_node_link_down(n, bearer_id, false);
+
+ if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
+ tipc_named_rcv(net, &n->bclink.namedq);
+
+ if (!skb_queue_empty(&le->inputq))
+ tipc_sk_rcv(net, &le->inputq);
+
+ if (!skb_queue_empty(&xmitq))
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+
+ tipc_node_put(n);
+discard:
+ kfree_skb(skb);
+}
+
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 5a834cf142c8..344b3e7594fd 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,23 +45,19 @@
/* Out-of-range value for node signature */
#define INVALID_NODE_SIG 0x10000
+#define INVALID_BEARER_ID -1
+
/* Flags used to take different actions according to flag type
- * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
- * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
* TIPC_NOTIFY_NODE_DOWN: notify node is down
* TIPC_NOTIFY_NODE_UP: notify node is up
* TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
*/
enum {
- TIPC_MSG_EVT = 1,
- TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
- TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
TIPC_NOTIFY_NODE_DOWN = (1 << 3),
TIPC_NOTIFY_NODE_UP = (1 << 4),
TIPC_WAKEUP_BCAST_USERS = (1 << 5),
TIPC_NOTIFY_LINK_UP = (1 << 6),
TIPC_NOTIFY_LINK_DOWN = (1 << 7),
- TIPC_NAMED_MSG_EVT = (1 << 8),
TIPC_BCAST_MSG_EVT = (1 << 9),
TIPC_BCAST_RESET = (1 << 10)
};
@@ -85,10 +81,17 @@ struct tipc_node_bclink {
u32 deferred_size;
struct sk_buff_head deferdq;
struct sk_buff *reasm_buf;
- int inputq_map;
+ struct sk_buff_head namedq;
bool recv_permitted;
};
+struct tipc_link_entry {
+ struct tipc_link *link;
+ u32 mtu;
+ struct sk_buff_head inputq;
+ struct tipc_media_addr maddr;
+};
+
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
@@ -98,11 +101,12 @@ struct tipc_node_bclink {
* @hash: links to adjacent nodes in unsorted hash chain
* @inputq: pointer to input queue containing messages for msg event
* @namedq: pointer to name table input queue with name table messages
- * @curr_link: the link holding the node lock, if any
- * @active_links: pointers to active links to node
- * @links: pointers to all links to node
+ * @active_links: bearer ids of active links, used as index into links[] array
+ * @links: array containing references to all links to node
* @action_flags: bit mask of different types of node actions
* @bclink: broadcast-related info
+ * @state: connectivity state vs peer node
+ * @sync_point: sequence number where synch/failover is finished
* @list: links to adjacent nodes in sorted list of cluster's nodes
* @working_links: number of working links to node (both active and standby)
* @link_cnt: number of links to node
@@ -118,14 +122,13 @@ struct tipc_node {
spinlock_t lock;
struct net *net;
struct hlist_node hash;
- struct sk_buff_head *inputq;
- struct sk_buff_head *namedq;
- struct tipc_link *active_links[2];
- u32 act_mtus[2];
- struct tipc_link *links[MAX_BEARERS];
+ int active_links[2];
+ struct tipc_link_entry links[MAX_BEARERS];
int action_flags;
struct tipc_node_bclink bclink;
struct list_head list;
+ int state;
+ u16 sync_point;
int link_cnt;
u16 working_links;
u16 capabilities;
@@ -133,25 +136,32 @@ struct tipc_node {
u32 link_id;
struct list_head publ_list;
struct list_head conn_sks;
+ unsigned long keepalive_intv;
+ struct timer_list timer;
struct rcu_head rcu;
};
struct tipc_node *tipc_node_find(struct net *net, u32 addr);
void tipc_node_put(struct tipc_node *node);
-struct tipc_node *tipc_node_create(struct net *net, u32 addr);
void tipc_node_stop(struct net *net);
+void tipc_node_check_dest(struct net *net, u32 onode,
+ struct tipc_bearer *bearer,
+ u16 capabilities, u32 signature,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr);
+void tipc_node_delete_links(struct net *net, int bearer_id);
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-int tipc_node_active_links(struct tipc_node *n_ptr);
-int tipc_node_is_up(struct tipc_node *n_ptr);
+bool tipc_node_is_up(struct tipc_node *n);
int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
char *linkname, size_t len);
void tipc_node_unlock(struct tipc_node *node);
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+ int selector);
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+ u32 selector);
int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
static inline void tipc_node_lock(struct tipc_node *node)
@@ -159,26 +169,30 @@ static inline void tipc_node_lock(struct tipc_node *node)
spin_lock_bh(&node->lock);
}
-static inline bool tipc_node_blocked(struct tipc_node *node)
+static inline struct tipc_link *node_active_link(struct tipc_node *n, int sel)
{
- return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
- TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
+ int bearer_id = n->active_links[sel & 1];
+
+ if (unlikely(bearer_id == INVALID_BEARER_ID))
+ return NULL;
+
+ return n->links[bearer_id].link;
}
-static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
+static inline unsigned int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
{
- struct tipc_node *node;
- u32 mtu;
-
- node = tipc_node_find(net, addr);
+ struct tipc_node *n;
+ int bearer_id;
+ unsigned int mtu = MAX_MSG_SIZE;
- if (likely(node)) {
- mtu = node->act_mtus[selector & 1];
- tipc_node_put(node);
- } else {
- mtu = MAX_MSG_SIZE;
- }
+ n = tipc_node_find(net, addr);
+ if (unlikely(!n))
+ return mtu;
+ bearer_id = n->active_links[sel & 1];
+ if (likely(bearer_id != INVALID_BEARER_ID))
+ mtu = n->links[bearer_id].mtu;
+ tipc_node_put(n);
return mtu;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3a7567f690f3..1060d52ff23e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -248,6 +248,22 @@ static void tsk_advance_rx_queue(struct sock *sk)
kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
}
+/* tipc_sk_respond() : send response message back to sender
+ */
+static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
+{
+ u32 selector;
+ u32 dnode;
+ u32 onode = tipc_own_addr(sock_net(sk));
+
+ if (!tipc_msg_reverse(onode, &skb, err))
+ return;
+
+ dnode = msg_destnode(buf_msg(skb));
+ selector = msg_origport(buf_msg(skb));
+ tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
+}
+
/**
* tsk_rej_rx_queue - reject all buffers in socket receive queue
*
@@ -256,13 +272,9 @@ static void tsk_advance_rx_queue(struct sock *sk)
static void tsk_rej_rx_queue(struct sock *sk)
{
struct sk_buff *skb;
- u32 dnode;
- u32 own_node = tsk_own_node(tipc_sk(sk));
- while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
- if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
- tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
- }
+ while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
+ tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
}
/* tsk_peer_msg - verify if message was sent by connected port's peer
@@ -441,9 +453,7 @@ static int tipc_release(struct socket *sock)
tsk->connected = 0;
tipc_node_remove_conn(net, dnode, tsk->portid);
}
- if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
- TIPC_ERR_NO_PORT))
- tipc_link_xmit_skb(net, skb, dnode, 0);
+ tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
}
}
@@ -456,7 +466,7 @@ static int tipc_release(struct socket *sock)
tsk_own_node(tsk), tsk_peer_port(tsk),
tsk->portid, TIPC_ERR_NO_PORT);
if (skb)
- tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+ tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
tipc_node_remove_conn(net, dnode, tsk->portid);
}
@@ -686,21 +696,22 @@ new_mtu:
do {
rc = tipc_bclink_xmit(net, pktchain);
- if (likely(rc >= 0)) {
- rc = dsz;
- break;
+ if (likely(!rc))
+ return dsz;
+
+ if (rc == -ELINKCONG) {
+ tsk->link_cong = 1;
+ rc = tipc_wait_for_sndmsg(sock, &timeo);
+ if (!rc)
+ continue;
}
+ __skb_queue_purge(pktchain);
if (rc == -EMSGSIZE) {
msg->msg_iter = save;
goto new_mtu;
}
- if (rc != -ELINKCONG)
- break;
- tipc_sk(sk)->link_cong = 1;
- rc = tipc_wait_for_sndmsg(sock, &timeo);
- if (rc)
- __skb_queue_purge(pktchain);
- } while (!rc);
+ break;
+ } while (1);
return rc;
}
@@ -763,35 +774,35 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
/**
* tipc_sk_proto_rcv - receive a connection mng protocol message
* @tsk: receiving socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
+ * @skb: pointer to message buffer.
*/
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
{
- struct tipc_msg *msg = buf_msg(*skb);
+ struct sock *sk = &tsk->sk;
+ struct tipc_msg *hdr = buf_msg(skb);
+ int mtyp = msg_type(hdr);
int conn_cong;
- u32 dnode;
- u32 own_node = tsk_own_node(tsk);
+
/* Ignore if connection cannot be validated: */
- if (!tsk_peer_msg(tsk, msg))
+ if (!tsk_peer_msg(tsk, hdr))
goto exit;
tsk->probing_state = TIPC_CONN_OK;
- if (msg_type(msg) == CONN_ACK) {
+ if (mtyp == CONN_PROBE) {
+ msg_set_type(hdr, CONN_PROBE_REPLY);
+ tipc_sk_respond(sk, skb, TIPC_OK);
+ return;
+ } else if (mtyp == CONN_ACK) {
conn_cong = tsk_conn_cong(tsk);
- tsk->sent_unacked -= msg_msgcnt(msg);
+ tsk->sent_unacked -= msg_msgcnt(hdr);
if (conn_cong)
- tsk->sk.sk_write_space(&tsk->sk);
- } else if (msg_type(msg) == CONN_PROBE) {
- if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
- msg_set_type(msg, CONN_PROBE_REPLY);
- return;
- }
+ sk->sk_write_space(sk);
+ } else if (mtyp != CONN_PROBE_REPLY) {
+ pr_warn("Received unknown CONN_PROTO msg\n");
}
- /* Do nothing if msg_type() == CONN_PROBE_REPLY */
exit:
- kfree_skb(*skb);
- *skb = NULL;
+ kfree_skb(skb);
}
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -924,24 +935,25 @@ new_mtu:
do {
skb = skb_peek(pktchain);
TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
- rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
- if (likely(rc >= 0)) {
+ rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+ if (likely(!rc)) {
if (sock->state != SS_READY)
sock->state = SS_CONNECTING;
- rc = dsz;
- break;
+ return dsz;
}
+ if (rc == -ELINKCONG) {
+ tsk->link_cong = 1;
+ rc = tipc_wait_for_sndmsg(sock, &timeo);
+ if (!rc)
+ continue;
+ }
+ __skb_queue_purge(pktchain);
if (rc == -EMSGSIZE) {
m->msg_iter = save;
goto new_mtu;
}
- if (rc != -ELINKCONG)
- break;
- tsk->link_cong = 1;
- rc = tipc_wait_for_sndmsg(sock, &timeo);
- if (rc)
- __skb_queue_purge(pktchain);
- } while (!rc);
+ break;
+ } while (1);
return rc;
}
@@ -1043,15 +1055,16 @@ next:
return rc;
do {
if (likely(!tsk_conn_cong(tsk))) {
- rc = tipc_link_xmit(net, pktchain, dnode, portid);
+ rc = tipc_node_xmit(net, pktchain, dnode, portid);
if (likely(!rc)) {
tsk->sent_unacked++;
sent += send;
if (sent == dsz)
- break;
+ return dsz;
goto next;
}
if (rc == -EMSGSIZE) {
+ __skb_queue_purge(pktchain);
tsk->max_pkt = tipc_node_get_mtu(net, dnode,
portid);
m->msg_iter = save;
@@ -1059,13 +1072,13 @@ next:
}
if (rc != -ELINKCONG)
break;
+
tsk->link_cong = 1;
}
rc = tipc_wait_for_sndpkt(sock, &timeo);
- if (rc)
- __skb_queue_purge(pktchain);
} while (!rc);
+ __skb_queue_purge(pktchain);
return sent ? sent : rc;
}
@@ -1221,7 +1234,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
return;
msg = buf_msg(skb);
msg_set_msgcnt(msg, ack);
- tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
+ tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
}
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1507,82 +1520,81 @@ static void tipc_data_ready(struct sock *sk)
* @tsk: TIPC socket
* @skb: pointer to message buffer. Set to NULL if buffer is consumed
*
- * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
+ * Returns true if everything ok, false otherwise
*/
-static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
+static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct socket *sock = sk->sk_socket;
- struct tipc_msg *msg = buf_msg(*skb);
- int retval = -TIPC_ERR_NO_PORT;
+ struct tipc_msg *hdr = buf_msg(skb);
- if (msg_mcast(msg))
- return retval;
+ if (unlikely(msg_mcast(hdr)))
+ return false;
switch ((int)sock->state) {
case SS_CONNECTED:
+
/* Accept only connection-based messages sent by peer */
- if (tsk_peer_msg(tsk, msg)) {
- if (unlikely(msg_errcode(msg))) {
- sock->state = SS_DISCONNECTING;
- tsk->connected = 0;
- /* let timer expire on it's own */
- tipc_node_remove_conn(net, tsk_peer_node(tsk),
- tsk->portid);
- }
- retval = TIPC_OK;
+ if (unlikely(!tsk_peer_msg(tsk, hdr)))
+ return false;
+
+ if (unlikely(msg_errcode(hdr))) {
+ sock->state = SS_DISCONNECTING;
+ tsk->connected = 0;
+ /* Let timer expire on it's own */
+ tipc_node_remove_conn(net, tsk_peer_node(tsk),
+ tsk->portid);
}
- break;
+ return true;
+
case SS_CONNECTING:
- /* Accept only ACK or NACK message */
- if (unlikely(!msg_connected(msg)))
- break;
+ /* Accept only ACK or NACK message */
+ if (unlikely(!msg_connected(hdr)))
+ return false;
- if (unlikely(msg_errcode(msg))) {
+ if (unlikely(msg_errcode(hdr))) {
sock->state = SS_DISCONNECTING;
sk->sk_err = ECONNREFUSED;
- retval = TIPC_OK;
- break;
+ return true;
}
- if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
+ if (unlikely(!msg_isdata(hdr))) {
sock->state = SS_DISCONNECTING;
sk->sk_err = EINVAL;
- retval = TIPC_OK;
- break;
+ return true;
}
- tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
- msg_set_importance(&tsk->phdr, msg_importance(msg));
+ tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
+ msg_set_importance(&tsk->phdr, msg_importance(hdr));
sock->state = SS_CONNECTED;
- /* If an incoming message is an 'ACK-', it should be
- * discarded here because it doesn't contain useful
- * data. In addition, we should try to wake up
- * connect() routine if sleeping.
- */
- if (msg_data_sz(msg) == 0) {
- kfree_skb(*skb);
- *skb = NULL;
- if (waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
- }
- retval = TIPC_OK;
- break;
+ /* If 'ACK+' message, add to socket receive queue */
+ if (msg_data_sz(hdr))
+ return true;
+
+ /* If empty 'ACK-' message, wake up sleeping connect() */
+ if (waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
+
+ /* 'ACK-' message is neither accepted nor rejected: */
+ msg_set_dest_droppable(hdr, 1);
+ return false;
+
case SS_LISTENING:
case SS_UNCONNECTED:
+
/* Accept only SYN message */
- if (!msg_connected(msg) && !(msg_errcode(msg)))
- retval = TIPC_OK;
+ if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+ return true;
break;
case SS_DISCONNECTING:
break;
default:
pr_err("Unknown socket state %u\n", sock->state);
}
- return retval;
+ return false;
}
/**
@@ -1617,61 +1629,70 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
/**
* filter_rcv - validate incoming message
* @sk: socket
- * @skb: pointer to message. Set to NULL if buffer is consumed.
+ * @skb: pointer to message.
*
* Enqueues message on receive queue if acceptable; optionally handles
* disconnect indication for a connected socket.
*
* Called with socket lock already taken
*
- * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
+ * Returns true if message was added to socket receive queue, otherwise false
*/
-static int filter_rcv(struct sock *sk, struct sk_buff **skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
{
struct socket *sock = sk->sk_socket;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_msg *msg = buf_msg(*skb);
- unsigned int limit = rcvbuf_limit(sk, *skb);
- int rc = TIPC_OK;
+ struct tipc_msg *hdr = buf_msg(skb);
+ unsigned int limit = rcvbuf_limit(sk, skb);
+ int err = TIPC_OK;
+ int usr = msg_user(hdr);
- if (unlikely(msg_user(msg) == CONN_MANAGER)) {
+ if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
tipc_sk_proto_rcv(tsk, skb);
- return TIPC_OK;
+ return false;
}
- if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
- kfree_skb(*skb);
+ if (unlikely(usr == SOCK_WAKEUP)) {
+ kfree_skb(skb);
tsk->link_cong = 0;
sk->sk_write_space(sk);
- *skb = NULL;
- return TIPC_OK;
+ return false;
}
- /* Reject message if it is wrong sort of message for socket */
- if (msg_type(msg) > TIPC_DIRECT_MSG)
- return -TIPC_ERR_NO_PORT;
+ /* Drop if illegal message type */
+ if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
+ kfree_skb(skb);
+ return false;
+ }
- if (sock->state == SS_READY) {
- if (msg_connected(msg))
- return -TIPC_ERR_NO_PORT;
- } else {
- rc = filter_connect(tsk, skb);
- if (rc != TIPC_OK || !*skb)
- return rc;
+ /* Reject if wrong message type for current socket state */
+ if (unlikely(sock->state == SS_READY)) {
+ if (msg_connected(hdr)) {
+ err = TIPC_ERR_NO_PORT;
+ goto reject;
+ }
+ } else if (unlikely(!filter_connect(tsk, skb))) {
+ err = TIPC_ERR_NO_PORT;
+ goto reject;
}
/* Reject message if there isn't room to queue it */
- if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
- return -TIPC_ERR_OVERLOAD;
+ if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
+ err = TIPC_ERR_OVERLOAD;
+ goto reject;
+ }
/* Enqueue message */
- TIPC_SKB_CB(*skb)->handle = NULL;
- __skb_queue_tail(&sk->sk_receive_queue, *skb);
- skb_set_owner_r(*skb, sk);
+ TIPC_SKB_CB(skb)->handle = NULL;
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ skb_set_owner_r(skb, sk);
sk->sk_data_ready(sk);
- *skb = NULL;
- return TIPC_OK;
+ return true;
+
+reject:
+ tipc_sk_respond(sk, skb, err);
+ return false;
}
/**
@@ -1685,22 +1706,10 @@ static int filter_rcv(struct sock *sk, struct sk_buff **skb)
*/
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
- int err;
- atomic_t *dcnt;
- u32 dnode;
- struct tipc_sock *tsk = tipc_sk(sk);
- struct net *net = sock_net(sk);
- uint truesize = skb->truesize;
+ unsigned int truesize = skb->truesize;
- err = filter_rcv(sk, &skb);
- if (likely(!skb)) {
- dcnt = &tsk->dupl_rcvcnt;
- if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
- atomic_add(truesize, dcnt);
- return 0;
- }
- if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
- tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+ if (likely(filter_rcv(sk, skb)))
+ atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
return 0;
}
@@ -1710,45 +1719,43 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
* @inputq: list of incoming buffers with potentially different destinations
* @sk: socket where the buffers should be enqueued
* @dport: port number for the socket
- * @_skb: returned buffer to be forwarded or rejected, if applicable
*
* Caller must hold socket lock
- *
- * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
- * or -TIPC_ERR_NO_PORT
*/
-static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
- u32 dport, struct sk_buff **_skb)
+static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+ u32 dport)
{
unsigned int lim;
atomic_t *dcnt;
- int err;
struct sk_buff *skb;
unsigned long time_limit = jiffies + 2;
while (skb_queue_len(inputq)) {
if (unlikely(time_after_eq(jiffies, time_limit)))
- return TIPC_OK;
+ return;
+
skb = tipc_skb_dequeue(inputq, dport);
if (unlikely(!skb))
- return TIPC_OK;
+ return;
+
+ /* Add message directly to receive queue if possible */
if (!sock_owned_by_user(sk)) {
- err = filter_rcv(sk, &skb);
- if (likely(!skb))
- continue;
- *_skb = skb;
- return err;
+ filter_rcv(sk, skb);
+ continue;
}
+
+ /* Try backlog, compensating for double-counted bytes */
dcnt = &tipc_sk(sk)->dupl_rcvcnt;
if (sk->sk_backlog.len)
atomic_set(dcnt, 0);
lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
if (likely(!sk_add_backlog(sk, skb, lim)))
continue;
- *_skb = skb;
- return -TIPC_ERR_OVERLOAD;
+
+ /* Overload => reject message back to sender */
+ tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+ break;
}
- return TIPC_OK;
}
/**
@@ -1756,49 +1763,46 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
* @inputq: buffer list containing the buffers
* Consumes all buffers in list until inputq is empty
* Note: may be called in multiple threads referring to the same queue
- * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
- * Only node local calls check the return value, sending single-buffer queues
*/
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
u32 dnode, dport = 0;
int err;
- struct sk_buff *skb;
struct tipc_sock *tsk;
- struct tipc_net *tn;
struct sock *sk;
+ struct sk_buff *skb;
while (skb_queue_len(inputq)) {
- err = -TIPC_ERR_NO_PORT;
- skb = NULL;
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
+
if (likely(tsk)) {
sk = &tsk->sk;
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
- err = tipc_sk_enqueue(inputq, sk, dport, &skb);
+ tipc_sk_enqueue(inputq, sk, dport);
spin_unlock_bh(&sk->sk_lock.slock);
- dport = 0;
}
sock_put(sk);
- } else {
- skb = tipc_skb_dequeue(inputq, dport);
- }
- if (likely(!skb))
continue;
- if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
- goto xmit;
- if (!err) {
- dnode = msg_destnode(buf_msg(skb));
- goto xmit;
}
- tn = net_generic(net, tipc_net_id);
- if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
+
+ /* No destination socket => dequeue skb if still there */
+ skb = tipc_skb_dequeue(inputq, dport);
+ if (!skb)
+ return;
+
+ /* Try secondary lookup if unresolved named message */
+ err = TIPC_ERR_NO_PORT;
+ if (tipc_msg_lookup_dest(net, skb, &err))
+ goto xmit;
+
+ /* Prepare for message rejection */
+ if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
continue;
xmit:
- tipc_link_xmit_skb(net, skb, dnode, dport);
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, dport);
}
- return err ? -EHOSTUNREACH : 0;
}
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2067,7 +2071,10 @@ static int tipc_shutdown(struct socket *sock, int how)
struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
struct sk_buff *skb;
- u32 dnode;
+ u32 dnode = tsk_peer_node(tsk);
+ u32 dport = tsk_peer_port(tsk);
+ u32 onode = tipc_own_addr(net);
+ u32 oport = tsk->portid;
int res;
if (how != SHUT_RDWR)
@@ -2080,6 +2087,8 @@ static int tipc_shutdown(struct socket *sock, int how)
case SS_CONNECTED:
restart:
+ dnode = tsk_peer_node(tsk);
+
/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
skb = __skb_dequeue(&sk->sk_receive_queue);
if (skb) {
@@ -2087,19 +2096,13 @@ restart:
kfree_skb(skb);
goto restart;
}
- if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
- TIPC_CONN_SHUTDOWN))
- tipc_link_xmit_skb(net, skb, dnode,
- tsk->portid);
+ tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
} else {
- dnode = tsk_peer_node(tsk);
-
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE,
- 0, dnode, tsk_own_node(tsk),
- tsk_peer_port(tsk),
- tsk->portid, TIPC_CONN_SHUTDOWN);
- tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+ 0, dnode, onode, dport, oport,
+ TIPC_CONN_SHUTDOWN);
+ tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
}
tsk->connected = 0;
sock->state = SS_DISCONNECTING;
@@ -2161,7 +2164,7 @@ static void tipc_sk_timeout(unsigned long data)
}
bh_unlock_sock(sk);
if (skb)
- tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
+ tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
exit:
sock_put(sk);
}
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index bf6551389522..4241f22069dc 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -44,7 +44,7 @@
SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
int tipc_socket_init(void);
void tipc_socket_stop(void);
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
struct sk_buff_head *inputq);
void tipc_sk_reinit(struct net *net);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 66deebc66aa1..c170d3138953 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -194,7 +194,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
.saddr = src->ipv6,
.flowi6_proto = IPPROTO_UDP
};
- err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6);
+ err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
+ &fl6);
if (err)
goto tx_error;
ttl = ip6_dst_hoplimit(ndst);
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 7d730543f243..477364ad750e 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -135,8 +135,7 @@ EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
* @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
* %WIMAX_RF_OFF radio off.
*
- * Reports changes in the software RF switch state to the the WiMAX
- * stack.
+ * Reports changes in the software RF switch state to the WiMAX stack.
*
* The main use is during initialization, so the driver can query the
* device for its current software radio kill switch state and feed it
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index bd16c6c7e1e7..0cebf1fc37a2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2048,7 +2048,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
xfrm_audit_policy_delete(xp, 1, true);
} else {
// reset the timers here?
- WARN(1, "Dont know what to do with soft policy expire\n");
+ WARN(1, "Don't know what to do with soft policy expire\n");
}
km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4450fed91ab4..63e7d50e6a4f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -12,6 +12,7 @@ hostprogs-y += tracex2
hostprogs-y += tracex3
hostprogs-y += tracex4
hostprogs-y += tracex5
+hostprogs-y += tracex6
hostprogs-y += lathist
test_verifier-objs := test_verifier.o libbpf.o
@@ -25,6 +26,7 @@ tracex2-objs := bpf_load.o libbpf.o tracex2_user.o
tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
+tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
lathist-objs := bpf_load.o libbpf.o lathist_user.o
# Tell kbuild to always build the programs
@@ -37,6 +39,7 @@ always += tracex2_kern.o
always += tracex3_kern.o
always += tracex4_kern.o
always += tracex5_kern.o
+always += tracex6_kern.o
always += tcbpf1_kern.o
always += lathist_kern.o
@@ -51,6 +54,7 @@ HOSTLOADLIBES_tracex2 += -lelf
HOSTLOADLIBES_tracex3 += -lelf
HOSTLOADLIBES_tracex4 += -lelf -lrt
HOSTLOADLIBES_tracex5 += -lelf
+HOSTLOADLIBES_tracex6 += -lelf
HOSTLOADLIBES_lathist += -lelf
# point this to your LLVM backend with bpf support
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index bdf1c1607b80..3a44d3a272af 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -31,6 +31,8 @@ static unsigned long long (*bpf_get_current_uid_gid)(void) =
(void *) BPF_FUNC_get_current_uid_gid;
static int (*bpf_get_current_comm)(void *buf, int buf_size) =
(void *) BPF_FUNC_get_current_comm;
+static int (*bpf_perf_event_read)(void *map, int index) =
+ (void *) BPF_FUNC_perf_event_read;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
@@ -60,4 +62,29 @@ static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flag
static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
(void *) BPF_FUNC_l4_csum_replace;
+#if defined(__x86_64__)
+
+#define PT_REGS_PARM1(x) ((x)->di)
+#define PT_REGS_PARM2(x) ((x)->si)
+#define PT_REGS_PARM3(x) ((x)->dx)
+#define PT_REGS_PARM4(x) ((x)->cx)
+#define PT_REGS_PARM5(x) ((x)->r8)
+#define PT_REGS_RET(x) ((x)->sp)
+#define PT_REGS_FP(x) ((x)->bp)
+#define PT_REGS_RC(x) ((x)->ax)
+#define PT_REGS_SP(x) ((x)->sp)
+
+#elif defined(__s390x__)
+
+#define PT_REGS_PARM1(x) ((x)->gprs[2])
+#define PT_REGS_PARM2(x) ((x)->gprs[3])
+#define PT_REGS_PARM3(x) ((x)->gprs[4])
+#define PT_REGS_PARM4(x) ((x)->gprs[5])
+#define PT_REGS_PARM5(x) ((x)->gprs[6])
+#define PT_REGS_RET(x) ((x)->gprs[14])
+#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->gprs[2])
+#define PT_REGS_SP(x) ((x)->gprs[15])
+
+#endif
#endif
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 693605997abc..ee0f110c9c54 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -822,6 +822,65 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
+ {
+ "PTR_TO_STACK store/load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "PTR_TO_STACK store/load - bad alignment on off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned access off -6 size 8",
+ },
+ {
+ "PTR_TO_STACK store/load - bad alignment on reg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned access off -2 size 8",
+ },
+ {
+ "PTR_TO_STACK store/load - out of bounds low",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=-79992 size=8",
+ },
+ {
+ "PTR_TO_STACK store/load - out of bounds high",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=0 size=8",
+ },
};
static int probe_filter_length(struct bpf_insn *fp)
diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
index 31620463701a..3f450a8fa1f3 100644
--- a/samples/bpf/tracex1_kern.c
+++ b/samples/bpf/tracex1_kern.c
@@ -29,7 +29,7 @@ int bpf_prog1(struct pt_regs *ctx)
int len;
/* non-portable! works for the given kernel only */
- skb = (struct sk_buff *) ctx->di;
+ skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
dev = _(skb->dev);
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index dc50f4f2943f..b32367cfbff4 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -27,10 +27,10 @@ int bpf_prog2(struct pt_regs *ctx)
long init_val = 1;
long *value;
- /* x64 specific: read ip of kfree_skb caller.
+ /* x64/s390x specific: read ip of kfree_skb caller.
* non-portable version of __builtin_return_address(0)
*/
- bpf_probe_read(&loc, sizeof(loc), (void *)ctx->sp);
+ bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx));
value = bpf_map_lookup_elem(&my_map, &loc);
if (value)
@@ -79,7 +79,7 @@ struct bpf_map_def SEC("maps") my_hist_map = {
SEC("kprobe/sys_write")
int bpf_prog3(struct pt_regs *ctx)
{
- long write_size = ctx->dx; /* arg3 */
+ long write_size = PT_REGS_PARM3(ctx);
long init_val = 1;
long *value;
struct hist_key key = {};
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
index 255ff2792366..bf337fbb0947 100644
--- a/samples/bpf/tracex3_kern.c
+++ b/samples/bpf/tracex3_kern.c
@@ -23,7 +23,7 @@ struct bpf_map_def SEC("maps") my_map = {
SEC("kprobe/blk_mq_start_request")
int bpf_prog1(struct pt_regs *ctx)
{
- long rq = ctx->di;
+ long rq = PT_REGS_PARM1(ctx);
u64 val = bpf_ktime_get_ns();
bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
@@ -51,7 +51,7 @@ struct bpf_map_def SEC("maps") lat_map = {
SEC("kprobe/blk_update_request")
int bpf_prog2(struct pt_regs *ctx)
{
- long rq = ctx->di;
+ long rq = PT_REGS_PARM1(ctx);
u64 *value, l, base;
u32 index;
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c
index 126b80512228..ac4671420cf1 100644
--- a/samples/bpf/tracex4_kern.c
+++ b/samples/bpf/tracex4_kern.c
@@ -27,7 +27,7 @@ struct bpf_map_def SEC("maps") my_map = {
SEC("kprobe/kmem_cache_free")
int bpf_prog1(struct pt_regs *ctx)
{
- long ptr = ctx->si;
+ long ptr = PT_REGS_PARM2(ctx);
bpf_map_delete_elem(&my_map, &ptr);
return 0;
@@ -36,11 +36,11 @@ int bpf_prog1(struct pt_regs *ctx)
SEC("kretprobe/kmem_cache_alloc_node")
int bpf_prog2(struct pt_regs *ctx)
{
- long ptr = ctx->ax;
+ long ptr = PT_REGS_RC(ctx);
long ip = 0;
/* get ip address of kmem_cache_alloc_node() caller */
- bpf_probe_read(&ip, sizeof(ip), (void *)(ctx->bp + sizeof(ip)));
+ bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip)));
struct pair v = {
.val = bpf_ktime_get_ns(),
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
index b71fe07a7a7a..b3f4295bf288 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5_kern.c
@@ -24,7 +24,7 @@ int bpf_prog1(struct pt_regs *ctx)
{
struct seccomp_data sd = {};
- bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+ bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
/* dispatch into next BPF program depending on syscall number */
bpf_tail_call(ctx, &progs, sd.nr);
@@ -42,7 +42,7 @@ PROG(__NR_write)(struct pt_regs *ctx)
{
struct seccomp_data sd = {};
- bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+ bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
if (sd.args[2] == 512) {
char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
bpf_trace_printk(fmt, sizeof(fmt),
@@ -55,7 +55,7 @@ PROG(__NR_read)(struct pt_regs *ctx)
{
struct seccomp_data sd = {};
- bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+ bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
if (sd.args[2] > 128 && sd.args[2] <= 1024) {
char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
bpf_trace_printk(fmt, sizeof(fmt),
diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c
new file mode 100644
index 000000000000..be479c4af9e2
--- /dev/null
+++ b/samples/bpf/tracex6_kern.c
@@ -0,0 +1,27 @@
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(u32),
+ .max_entries = 32,
+};
+
+SEC("kprobe/sys_write")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ u64 count;
+ u32 key = bpf_get_smp_processor_id();
+ char fmt[] = "CPU-%d %llu\n";
+
+ count = bpf_perf_event_read(&my_map, key);
+ bpf_trace_printk(fmt, sizeof(fmt), key, count);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c
new file mode 100644
index 000000000000..8ea4976cfcf1
--- /dev/null
+++ b/samples/bpf/tracex6_user.c
@@ -0,0 +1,72 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define SAMPLE_PERIOD 0x7fffffffffffffffULL
+
+static void test_bpf_perf_event(void)
+{
+ int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ int *pmu_fd = malloc(nr_cpus * sizeof(int));
+ int status, i;
+
+ struct perf_event_attr attr_insn_pmu = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_HARDWARE,
+ .read_format = 0,
+ .sample_type = 0,
+ .config = 0,/* PMU: cycles */
+ };
+
+ for (i = 0; i < nr_cpus; i++) {
+ pmu_fd[i] = perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
+ if (pmu_fd[i] < 0) {
+ printf("event syscall failed\n");
+ goto exit;
+ }
+
+ bpf_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY);
+ ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+ }
+
+ status = system("ls > /dev/null");
+ if (status)
+ goto exit;
+ status = system("sleep 2");
+ if (status)
+ goto exit;
+
+exit:
+ for (i = 0; i < nr_cpus; i++)
+ close(pmu_fd[i]);
+ close(map_fd[0]);
+ free(pmu_fd);
+}
+
+int main(int argc, char **argv)
+{
+ char filename[256];
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ test_bpf_perf_event();
+ read_trace_pipe();
+
+ return 0;
+}
diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c
index 618c2bcd4eab..2cd3d4c99738 100644
--- a/tools/net/bpf_jit_disasm.c
+++ b/tools/net/bpf_jit_disasm.c
@@ -22,9 +22,14 @@
#include <string.h>
#include <bfd.h>
#include <dis-asm.h>
+#include <regex.h>
+#include <fcntl.h>
#include <sys/klog.h>
#include <sys/types.h>
-#include <regex.h>
+#include <sys/stat.h>
+
+#define CMD_ACTION_SIZE_BUFFER 10
+#define CMD_ACTION_READ_ALL 3
static void get_exec_path(char *tpath, size_t size)
{
@@ -87,20 +92,66 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
bfd_close(bfdf);
}
-static char *get_klog_buff(int *klen)
+static char *get_klog_buff(unsigned int *klen)
{
- int ret, len = klogctl(10, NULL, 0);
- char *buff = malloc(len);
+ int ret, len;
+ char *buff;
+
+ len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
+ buff = malloc(len);
+ if (!buff)
+ return NULL;
+
+ ret = klogctl(CMD_ACTION_READ_ALL, buff, len);
+ if (ret < 0) {
+ free(buff);
+ return NULL;
+ }
- assert(buff && klen);
- ret = klogctl(3, buff, len);
- assert(ret >= 0);
*klen = ret;
+ return buff;
+}
+static char *get_flog_buff(const char *file, unsigned int *klen)
+{
+ int fd, ret, len;
+ struct stat fi;
+ char *buff;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0)
+ return NULL;
+
+ ret = fstat(fd, &fi);
+ if (ret < 0 || !S_ISREG(fi.st_mode))
+ goto out;
+
+ len = fi.st_size + 1;
+ buff = malloc(len);
+ if (!buff)
+ goto out;
+
+ memset(buff, 0, len);
+ ret = read(fd, buff, len - 1);
+ if (ret <= 0)
+ goto out_free;
+
+ close(fd);
+ *klen = ret;
return buff;
+out_free:
+ free(buff);
+out:
+ close(fd);
+ return NULL;
+}
+
+static char *get_log_buff(const char *file, unsigned int *klen)
+{
+ return file ? get_flog_buff(file, klen) : get_klog_buff(klen);
}
-static void put_klog_buff(char *buff)
+static void put_log_buff(char *buff)
{
free(buff);
}
@@ -138,8 +189,10 @@ static int get_last_jit_image(char *haystack, size_t hlen,
ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
&flen, &proglen, &pass, &base);
- if (ret != 4)
+ if (ret != 4) {
+ regfree(&regex);
return 0;
+ }
tmp = ptr = haystack + off;
while ((ptr = strtok(tmp, "\n")) != NULL && ulen < ilen) {
@@ -169,31 +222,49 @@ static int get_last_jit_image(char *haystack, size_t hlen,
return ulen;
}
+static void usage(void)
+{
+ printf("Usage: bpf_jit_disasm [...]\n");
+ printf(" -o Also display related opcodes (default: off).\n");
+ printf(" -f <file> Read last image dump from file or stdin (default: klog).\n");
+ printf(" -h Display this help.\n");
+}
+
int main(int argc, char **argv)
{
- int len, klen, opcodes = 0;
- char *kbuff;
+ unsigned int len, klen, opt, opcodes = 0;
static uint8_t image[32768];
+ char *kbuff, *file = NULL;
- if (argc > 1) {
- if (!strncmp("-o", argv[argc - 1], 2)) {
+ while ((opt = getopt(argc, argv, "of:")) != -1) {
+ switch (opt) {
+ case 'o':
opcodes = 1;
- } else {
- printf("usage: bpf_jit_disasm [-o: show opcodes]\n");
- exit(0);
+ break;
+ case 'f':
+ file = optarg;
+ break;
+ default:
+ usage();
+ return -1;
}
}
bfd_init();
memset(image, 0, sizeof(image));
- kbuff = get_klog_buff(&klen);
+ kbuff = get_log_buff(file, &klen);
+ if (!kbuff) {
+ fprintf(stderr, "Could not retrieve log buffer!\n");
+ return -1;
+ }
len = get_last_jit_image(kbuff, klen, image, sizeof(image));
if (len > 0)
get_asm_insns(image, len, opcodes);
+ else
+ fprintf(stderr, "No JIT image found!\n");
- put_klog_buff(kbuff);
-
+ put_log_buff(kbuff);
return 0;
}