summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/amt.c8
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c31
-rw-r--r--drivers/net/bonding/bond_netlink.c15
-rw-r--r--drivers/net/bonding/bond_options.c33
-rw-r--r--drivers/net/caif/caif_virtio.c10
-rw-r--r--drivers/net/can/Kconfig111
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/can327.c1137
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c2
-rw-r--r--drivers/net/can/dev/Makefile17
-rw-r--r--drivers/net/can/dev/bittiming.c197
-rw-r--r--drivers/net/can/dev/calc_bittiming.c202
-rw-r--r--drivers/net/can/dev/dev.c9
-rw-r--r--drivers/net/can/dev/netlink.c9
-rw-r--r--drivers/net/can/dev/skb.c72
-rw-r--r--drivers/net/can/grcan.c1
-rw-r--r--drivers/net/can/m_can/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can.c12
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c5
-rw-r--r--drivers/net/can/slcan/Makefile7
-rw-r--r--drivers/net/can/slcan/slcan-core.c (renamed from drivers/net/can/slcan.c)504
-rw-r--r--drivers/net/can/slcan/slcan-ethtool.c65
-rw-r--r--drivers/net/can/slcan/slcan.h18
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c6
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c22
-rw-r--r--drivers/net/can/usb/Kconfig15
-rw-r--r--drivers/net/can/usb/Makefile2
-rw-r--r--drivers/net/can/usb/esd_usb.c (renamed from drivers/net/can/usb/esd_usb2.c)250
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c5
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h6
-rw-r--r--drivers/net/can/usb/gs_usb.c23
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h25
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c285
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c119
-rw-r--r--drivers/net/can/xilinx_can.c76
-rw-r--r--drivers/net/dsa/Kconfig9
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_spi.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c5
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c1
-rw-r--r--drivers/net/dsa/microchip/Kconfig42
-rw-r--r--drivers/net/dsa/microchip/Makefile11
-rw-r--r--drivers/net/dsa/microchip/ksz8.h104
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c583
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h29
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c19
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c335
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h60
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c6
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h22
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c150
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c818
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h197
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c (renamed from drivers/net/dsa/microchip/ksz8795_spi.c)125
-rw-r--r--drivers/net/dsa/microchip/lan937x.h27
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c484
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h180
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c39
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c36
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/ocelot/Kconfig1
-rw-r--r--drivers/net/dsa/ocelot/felix.c9
-rw-r--r--drivers/net/dsa/ocelot/felix.h1
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c246
-rw-r--r--drivers/net/dsa/qca/ar9331.c17
-rw-r--r--drivers/net/dsa/qca8k.c22
-rw-r--r--drivers/net/dsa/qca8k.h2
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c1064
-rw-r--r--drivers/net/dsa/rzn1_a5psw.h259
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c11
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c15
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c108
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c6
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_hci.h40
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c36
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c3
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_tx.c25
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c9
-rw-r--r--drivers/net/ethernet/intel/e100.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c92
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c77
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c386
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c47
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c52
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h7
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c529
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c474
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c433
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c1072
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c175
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c843
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig24
-rw-r--r--drivers/net/ethernet/neterion/Makefile1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c10
-rw-r--r--drivers/net/ethernet/neterion/vxge/Makefile8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c5099
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2086
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c1154
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h48
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4808
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h516
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-reg.h4636
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c2428
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h2290
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-version.h49
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c68
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/rings.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/rings.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c144
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c201
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c30
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100.c69
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c130
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.h4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c422
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h10
-rw-r--r--drivers/net/ethernet/sfc/efx.c73
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c77
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h16
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c22
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c50
-rw-r--r--drivers/net/ethernet/sfc/falcon/bitfield.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c15
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h10
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h69
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h10
-rw-r--r--drivers/net/ethernet/sfc/sriov.c10
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.h2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig32
-rw-r--r--drivers/net/ethernet/wangxun/Makefile6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe.h24
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c165
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h57
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c15
-rw-r--r--drivers/net/hamradio/6pack.c9
-rw-r--r--drivers/net/ipa/gsi_trans.c4
-rw-r--r--drivers/net/pcs/Kconfig12
-rw-r--r--drivers/net/pcs/Makefile1
-rw-r--r--drivers/net/pcs/pcs-lynx.c80
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c531
-rw-r--r--drivers/net/phy/Kconfig7
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/aquantia_main.c35
-rw-r--r--drivers/net/phy/at803x.c6
-rw-r--r--drivers/net/phy/ax88796b.c6
-rw-r--r--drivers/net/phy/bcm-phy-lib.h19
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c944
-rw-r--r--drivers/net/phy/broadcom.c48
-rw-r--r--drivers/net/phy/dp83822.c4
-rw-r--r--drivers/net/phy/dp83td510.c49
-rw-r--r--drivers/net/phy/micrel.c73
-rw-r--r--drivers/net/phy/mxl-gpy.c106
-rw-r--r--drivers/net/phy/nxp-tja11xx.c11
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/phy/phy_device.c23
-rw-r--r--drivers/net/phy/phylink.c74
-rw-r--r--drivers/net/phy/sfp.c12
-rw-r--r--drivers/net/phy/smsc.c19
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/usb/asix.h6
-rw-r--r--drivers/net/usb/asix_common.c41
-rw-r--r--drivers/net/usb/ax88179_178a.c101
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/smsc95xx.c202
-rw-r--r--drivers/net/usb/usbnet.c32
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c33
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c4
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c56
362 files changed, 15032 insertions, 27465 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b2a4f998c180..94c889802566 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -94,6 +94,7 @@ config WIREGUARD
select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
select CRYPTO_POLY1305_MIPS if MIPS
+ select CRYPTO_CHACHA_S390 if S390
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's
@@ -499,6 +500,8 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/can/Kconfig"
+
source "drivers/net/mctp/Kconfig"
source "drivers/net/mdio/Kconfig"
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index be2719a3ba70..732f4c0daa73 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -1373,11 +1373,11 @@ static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
int i;
if (!v6) {
- igmp_grec = (struct igmpv3_grec *)grec;
+ igmp_grec = grec;
nsrcs = ntohs(igmp_grec->grec_nsrcs);
} else {
#if IS_ENABLED(CONFIG_IPV6)
- mld_grec = (struct mld2_grec *)grec;
+ mld_grec = grec;
nsrcs = ntohs(mld_grec->grec_nsrcs);
#else
return;
@@ -1458,11 +1458,11 @@ static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
int i, j;
if (!v6) {
- igmp_grec = (struct igmpv3_grec *)grec;
+ igmp_grec = grec;
nsrcs = ntohs(igmp_grec->grec_nsrcs);
} else {
#if IS_ENABLED(CONFIG_IPV6)
- mld_grec = (struct mld2_grec *)grec;
+ mld_grec = grec;
nsrcs = ntohs(mld_grec->grec_nsrcs);
#else
return;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a86b1f71762e..d7fb33c078e8 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2228,7 +2228,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
temp_aggregator->num_of_ports--;
if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
- ad_clear_agg(temp_aggregator);
+ if (temp_aggregator->num_of_ports == 0)
+ ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 303c8d32d451..007d43e46dcb 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1302,12 +1302,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
return res;
if (rlb_enabled) {
- bond->alb_info.rlb_enabled = 1;
res = rlb_initialize(bond);
if (res) {
tlb_deinitialize(bond);
return res;
}
+ bond->alb_info.rlb_enabled = 1;
} else {
bond->alb_info.rlb_enabled = 0;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3d427183ec8e..e75acb14d066 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1026,12 +1026,38 @@ out:
}
+/**
+ * bond_choose_primary_or_current - select the primary or high priority slave
+ * @bond: our bonding struct
+ *
+ * - Check if there is a primary link. If the primary link was set and is up,
+ * go on and do link reselection.
+ *
+ * - If primary link is not set or down, find the highest priority link.
+ * If the highest priority link is not current slave, set it as primary
+ * link and do link reselection.
+ */
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
+ struct slave *slave, *hprio = NULL;
+ struct list_head *iter;
if (!prim || prim->link != BOND_LINK_UP) {
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->link == BOND_LINK_UP) {
+ hprio = hprio ?: slave;
+ if (slave->prio > hprio->prio)
+ hprio = slave;
+ }
+ }
+
+ if (hprio && hprio != curr) {
+ prim = hprio;
+ goto link_reselect;
+ }
+
if (!curr || curr->link != BOND_LINK_UP)
return NULL;
return curr;
@@ -1042,6 +1068,7 @@ static struct slave *bond_choose_primary_or_current(struct bonding *bond)
return prim;
}
+link_reselect:
if (!curr || curr->link != BOND_LINK_UP)
return prim;
@@ -3684,9 +3711,11 @@ re_arm:
if (!rtnl_trylock())
return;
- if (should_notify_peers)
+ if (should_notify_peers) {
+ bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
+ }
if (should_notify_rtnl) {
bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 5a6f44455b95..c2d080fc4fc4 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -27,6 +27,7 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
+ nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
0;
}
@@ -53,6 +54,9 @@ static int bond_fill_slave_info(struct sk_buff *skb,
if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
goto nla_put_failure;
+ if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio))
+ goto nla_put_failure;
+
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
const struct aggregator *agg;
const struct port *ad_port;
@@ -117,6 +121,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
+ [IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -157,6 +162,16 @@ static int bond_slave_changelink(struct net_device *bond_dev,
return err;
}
+ if (data[IFLA_BOND_SLAVE_PRIO]) {
+ int prio = nla_get_s32(data[IFLA_BOND_SLAVE_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, prio);
+ err = __bond_opt_set(bond, BOND_OPT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_PRIO], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 96eef19cffc4..3498db1c1b3c 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -40,6 +40,8 @@ static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_all_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_primary_reselect_set(struct bonding *bond,
@@ -365,6 +367,16 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_intmax_tbl,
.set = bond_option_miimon_set
},
+ [BOND_OPT_PRIO] = {
+ .id = BOND_OPT_PRIO,
+ .name = "prio",
+ .desc = "Link priority for failover re-selection",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+ BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB)),
+ .set = bond_option_prio_set
+ },
[BOND_OPT_PRIMARY] = {
.id = BOND_OPT_PRIMARY,
.name = "primary",
@@ -1306,6 +1318,27 @@ static int bond_option_missed_max_set(struct bonding *bond,
return 0;
}
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(newval->slave_dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+ slave->prio = newval->value;
+
+ if (rtnl_dereference(bond->primary_slave))
+ slave_warn(bond->dev, slave->dev,
+ "prio updated, but will not affect failover re-selection as primary slave have been set\n");
+ else
+ bond_select_active_slave(bond);
+
+ return 0;
+}
+
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 5458f57177a0..0b0f234b0b50 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -722,13 +722,21 @@ static int cfv_probe(struct virtio_device *vdev)
/* Carrier is off until netdevice is opened */
netif_carrier_off(netdev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
/* register Netdev */
- err = register_netdev(netdev);
+ err = register_netdevice(netdev);
if (err) {
+ rtnl_unlock();
dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
goto err;
}
+ virtio_device_ready(vdev);
+
+ rtnl_unlock();
+
debugfs_init(cfv);
return 0;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index b2dcc1e5a388..3048ad77edb3 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,5 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
-menu "CAN Device Drivers"
+
+menuconfig CAN_DEV
+ tristate "CAN Device Drivers"
+ default y
+ depends on CAN
+ help
+ Controller Area Network (CAN) is serial communications protocol up to
+ 1Mbit/s for its original release (now known as Classical CAN) and up
+ to 8Mbit/s for the more recent CAN with Flexible Data-Rate
+ (CAN-FD). The CAN bus was originally mainly for automotive, but is now
+ widely used in marine (NMEA2000), industrial, and medical
+ applications. More information on the CAN network protocol family
+ PF_CAN is contained in <Documentation/networking/can.rst>.
+
+ This section contains all the CAN(-FD) device drivers including the
+ virtual ones. If you own such devices or plan to use the virtual CAN
+ interfaces to develop applications, say Y here.
+
+ To compile as a module, choose M here: the module will be called
+ can-dev.
+
+if CAN_DEV
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
@@ -28,35 +49,22 @@ config CAN_VXCAN
This driver can also be built as a module. If so, the module
will be called vxcan.
-config CAN_SLCAN
- tristate "Serial / USB serial CAN Adaptors (slcan)"
- depends on TTY
+config CAN_NETLINK
+ bool "CAN device drivers with Netlink support"
+ default y
help
- CAN driver for several 'low cost' CAN interfaces that are attached
- via serial lines or via USB-to-serial adapters using the LAWICEL
- ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+ Enables the common framework for CAN device drivers. This is the
+ standard library and provides features for the Netlink interface such
+ as bittiming validation, support of CAN error states, device restart
+ and others.
- As only the sending and receiving of CAN frames is implemented, this
- driver should work with the (serial/USB) CAN hardware from:
- www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
-
- Userspace tools to attach the SLCAN line discipline (slcan_attach,
- slcand) can be found in the can-utils at the linux-can project, see
- https://github.com/linux-can/can-utils for details.
-
- The slcan driver supports up to 10 CAN netdevices by default which
- can be changed by the 'maxdev=xx' module option. This driver can
- also be built as a module. If so, the module will be called slcan.
+ The additional features selected by this option will be added to the
+ can-dev module.
-config CAN_DEV
- tristate "Platform CAN drivers with Netlink support"
- default y
- help
- Enables the common framework for platform CAN drivers with Netlink
- support. This is the standard library for CAN drivers.
- If unsure, say Y.
+ This is required by all platform and hardware CAN drivers. If you
+ plan to use such devices or if unsure, say Y.
-if CAN_DEV
+if CAN_NETLINK
config CAN_CALC_BITTIMING
bool "CAN bit-timing calculation"
@@ -69,8 +77,15 @@ config CAN_CALC_BITTIMING
source clock frequencies. Disabling saves some space, but then the
bit-timing parameters must be specified directly using the Netlink
arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
+
+ The additional features selected by this option will be added to the
+ can-dev module.
+
If unsure, say Y.
+config CAN_RX_OFFLOAD
+ bool
+
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM
@@ -78,10 +93,29 @@ config CAN_AT91
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
+config CAN_CAN327
+ tristate "Serial / USB serial ELM327 based OBD-II Interfaces (can327)"
+ depends on TTY
+ select CAN_RX_OFFLOAD
+ help
+ CAN driver for several 'low cost' OBD-II interfaces based on the
+ ELM327 OBD-II interpreter chip.
+
+ This is a best effort driver - the ELM327 interface was never
+ designed to be used as a standalone CAN interface. However, it can
+ still be used for simple request-response protocols (such as OBD II),
+ and to monitor broadcast messages on a bus (such as in a vehicle).
+
+ Please refer to the documentation for information on how to use it:
+ Documentation/networking/device_drivers/can/can327.rst
+
+ If this driver is built as a module, it will be called can327.
+
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
depends on OF || COLDFIRE || COMPILE_TEST
depends on HAS_IOMEM
+ select CAN_RX_OFFLOAD
help
Say Y here if you want to support for Freescale FlexCAN.
@@ -118,6 +152,26 @@ config CAN_KVASER_PCIEFD
Kvaser Mini PCI Express HS v2
Kvaser Mini PCI Express 2xHS v2
+config CAN_SLCAN
+ tristate "Serial / USB serial CAN Adaptors (slcan)"
+ depends on TTY
+ help
+ CAN driver for several 'low cost' CAN interfaces that are attached
+ via serial lines or via USB-to-serial adapters using the LAWICEL
+ ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+
+ As only the sending and receiving of CAN frames is implemented, this
+ driver should work with the (serial/USB) CAN hardware from:
+ www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
+
+ Userspace tools to attach the SLCAN line discipline (slcan_attach,
+ slcand) can be found in the can-utils at the linux-can project, see
+ https://github.com/linux-can/can-utils for details.
+
+ The slcan driver supports up to 10 CAN netdevices by default which
+ can be changed by the 'maxdev=xx' module option. This driver can
+ also be built as a module. If so, the module will be called slcan.
+
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
@@ -131,6 +185,7 @@ config CAN_SUN4I
config CAN_TI_HECC
depends on ARM
tristate "TI High End CAN Controller"
+ select CAN_RX_OFFLOAD
help
Driver for TI HECC (High End CAN Controller) module found on many
TI devices. The device specifications are available from www.ti.com
@@ -164,7 +219,7 @@ source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
source "drivers/net/can/usb/Kconfig"
-endif
+endif #CAN_NETLINK
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
@@ -174,4 +229,4 @@ config CAN_DEBUG_DEVICES
a problem with CAN support and want to see more of what is going
on.
-endmenu
+endif #CAN_DEV
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0af85983634c..61c75ce9d500 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
-obj-$(CONFIG_CAN_SLCAN) += slcan.o
+obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
obj-y += rcar/
@@ -14,6 +14,7 @@ obj-y += usb/
obj-y += softing/
obj-$(CONFIG_CAN_AT91) += at91_can.o
+obj-$(CONFIG_CAN_CAN327) += can327.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
new file mode 100644
index 000000000000..5da7778d92dc
--- /dev/null
+++ b/drivers/net/can/can327.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/* ELM327 based CAN interface driver (tty line discipline)
+ *
+ * This driver started as a derivative of linux/drivers/net/can/slcan.c
+ * and my thanks go to the original authors for their inspiration.
+ *
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ */
+
+#define pr_fmt(fmt) "can327: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/tty_ldisc.h>
+#include <linux/workqueue.h>
+
+#include <uapi/linux/tty.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/rx-offload.h>
+
+#define CAN327_NAPI_WEIGHT 4
+
+#define CAN327_SIZE_TXBUF 32
+#define CAN327_SIZE_RXBUF 1024
+
+#define CAN327_CAN_CONFIG_SEND_SFF 0x8000
+#define CAN327_CAN_CONFIG_VARIABLE_DLC 0x4000
+#define CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF 0x2000
+#define CAN327_CAN_CONFIG_BAUDRATE_MULT_8_7 0x1000
+
+#define CAN327_DUMMY_CHAR 'y'
+#define CAN327_DUMMY_STRING "y"
+#define CAN327_READY_CHAR '>'
+
+/* Bits in elm->cmds_todo */
+enum can327_tx_do {
+ CAN327_TX_DO_CAN_DATA = 0,
+ CAN327_TX_DO_CANID_11BIT,
+ CAN327_TX_DO_CANID_29BIT_LOW,
+ CAN327_TX_DO_CANID_29BIT_HIGH,
+ CAN327_TX_DO_CAN_CONFIG_PART2,
+ CAN327_TX_DO_CAN_CONFIG,
+ CAN327_TX_DO_RESPONSES,
+ CAN327_TX_DO_SILENT_MONITOR,
+ CAN327_TX_DO_INIT,
+};
+
+struct can327 {
+ /* This must be the first member when using alloc_candev() */
+ struct can_priv can;
+
+ struct can_rx_offload offload;
+
+ /* TTY buffers */
+ u8 txbuf[CAN327_SIZE_TXBUF];
+ u8 rxbuf[CAN327_SIZE_RXBUF];
+
+ /* Per-channel lock */
+ spinlock_t lock;
+
+ /* TTY and netdev devices that we're bridging */
+ struct tty_struct *tty;
+ struct net_device *dev;
+
+ /* TTY buffer accounting */
+ struct work_struct tx_work; /* Flushes TTY TX buffer */
+ u8 *txhead; /* Next TX byte */
+ size_t txleft; /* Bytes left to TX */
+ int rxfill; /* Bytes already RX'd in buffer */
+
+ /* State machine */
+ enum {
+ CAN327_STATE_NOTINIT = 0,
+ CAN327_STATE_GETDUMMYCHAR,
+ CAN327_STATE_GETPROMPT,
+ CAN327_STATE_RECEIVING,
+ } state;
+
+ /* Things we have yet to send */
+ char **next_init_cmd;
+ unsigned long cmds_todo;
+
+ /* The CAN frame and config the ELM327 is sending/using,
+ * or will send/use after finishing all cmds_todo
+ */
+ struct can_frame can_frame_to_send;
+ u16 can_config;
+ u8 can_bitrate_divisor;
+
+ /* Parser state */
+ bool drop_next_line;
+
+ /* Stop the channel on UART side hardware failure, e.g. stray
+ * characters or neverending lines. This may be caused by bad
+ * UART wiring, a bad ELM327, a bad UART bridge...
+ * Once this is true, nothing will be sent to the TTY.
+ */
+ bool uart_side_failure;
+};
+
+static inline void can327_uart_side_failure(struct can327 *elm);
+
+static void can327_send(struct can327 *elm, const void *buf, size_t len)
+{
+ int written;
+
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->uart_side_failure)
+ return;
+
+ memcpy(elm->txbuf, buf, len);
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+ written = elm->tty->ops->write(elm->tty, elm->txbuf, len);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+ return;
+ }
+
+ elm->txleft = len - written;
+ elm->txhead = elm->txbuf + written;
+}
+
+/* Take the ELM327 out of almost any state and back into command mode.
+ * We send CAN327_DUMMY_CHAR which will either abort any running
+ * operation, or be echoed back to us in case we're already in command
+ * mode.
+ */
+static void can327_kick_into_cmd_mode(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->state != CAN327_STATE_GETDUMMYCHAR &&
+ elm->state != CAN327_STATE_GETPROMPT) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+
+ elm->state = CAN327_STATE_GETDUMMYCHAR;
+ }
+}
+
+/* Schedule a CAN frame and necessary config changes to be sent to the TTY. */
+static void can327_send_frame(struct can327 *elm, struct can_frame *frame)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Schedule any necessary changes in ELM327's CAN configuration */
+ if (elm->can_frame_to_send.can_id != frame->can_id) {
+ /* Set the new CAN ID for transmission. */
+ if ((frame->can_id ^ elm->can_frame_to_send.can_id)
+ & CAN_EFF_FLAG) {
+ elm->can_config =
+ (frame->can_id & CAN_EFF_FLAG ? 0 : CAN327_CAN_CONFIG_SEND_SFF) |
+ CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF |
+ elm->can_bitrate_divisor;
+
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+ }
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo);
+ } else {
+ set_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_LOW,
+ &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH,
+ &elm->cmds_todo);
+ }
+ }
+
+ /* Schedule the CAN frame itself. */
+ elm->can_frame_to_send = *frame;
+ set_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+/* ELM327 initialisation sequence.
+ * The line length is limited by the buffer in can327_handle_prompt().
+ */
+static char *can327_init_script[] = {
+ "AT WS\r", /* v1.0: Warm Start */
+ "AT PP FF OFF\r", /* v1.0: All Programmable Parameters Off */
+ "AT M0\r", /* v1.0: Memory Off */
+ "AT AL\r", /* v1.0: Allow Long messages */
+ "AT BI\r", /* v1.0: Bypass Initialisation */
+ "AT CAF0\r", /* v1.0: CAN Auto Formatting Off */
+ "AT CFC0\r", /* v1.0: CAN Flow Control Off */
+ "AT CF 000\r", /* v1.0: Reset CAN ID Filter */
+ "AT CM 000\r", /* v1.0: Reset CAN ID Mask */
+ "AT E1\r", /* v1.0: Echo On */
+ "AT H1\r", /* v1.0: Headers On */
+ "AT L0\r", /* v1.0: Linefeeds Off */
+ "AT SH 7DF\r", /* v1.0: Set CAN sending ID to 0x7df */
+ "AT ST FF\r", /* v1.0: Set maximum Timeout for response after TX */
+ "AT AT0\r", /* v1.2: Adaptive Timing Off */
+ "AT D1\r", /* v1.3: Print DLC On */
+ "AT S1\r", /* v1.3: Spaces On */
+ "AT TP B\r", /* v1.0: Try Protocol B */
+ NULL
+};
+
+static void can327_init_device(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ elm->state = CAN327_STATE_NOTINIT;
+ elm->can_frame_to_send.can_id = 0x7df; /* ELM327 HW default */
+ elm->rxfill = 0;
+ elm->drop_next_line = 0;
+
+ /* We can only set the bitrate as a fraction of 500000.
+ * The bitrates listed in can327_bitrate_const will
+ * limit the user to the right values.
+ */
+ elm->can_bitrate_divisor = 500000 / elm->can.bittiming.bitrate;
+ elm->can_config =
+ CAN327_CAN_CONFIG_SEND_SFF | CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF | elm->can_bitrate_divisor;
+
+ /* Configure ELM327 and then start monitoring */
+ elm->next_init_cmd = &can327_init_script[0];
+ set_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+static void can327_feed_frame_to_netdev(struct can327 *elm, struct sk_buff *skb)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (!netif_running(elm->dev))
+ return;
+
+ /* Queue for NAPI pickup.
+ * rx-offload will update stats and LEDs for us.
+ */
+ if (can_rx_offload_queue_tail(&elm->offload, skb))
+ elm->dev->stats.rx_fifo_errors++;
+
+ /* Wake NAPI */
+ can_rx_offload_irq_finish(&elm->offload);
+}
+
+/* Called when we're out of ideas and just want it all to end. */
+static inline void can327_uart_side_failure(struct can327 *elm)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ elm->uart_side_failure = true;
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ elm->can.can_stats.bus_off++;
+ netif_stop_queue(elm->dev);
+ elm->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(elm->dev);
+
+ netdev_err(elm->dev,
+ "ELM327 misbehaved. Blocking further communication.\n");
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ return;
+
+ frame->can_id |= CAN_ERR_BUSOFF;
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Compares a byte buffer (non-NUL terminated) to the payload part of
+ * a string, and returns true iff the buffer (content *and* length) is
+ * exactly that string, without the terminating NUL byte.
+ *
+ * Example: If reference is "BUS ERROR", then this returns true iff nbytes == 9
+ * and !memcmp(buf, "BUS ERROR", 9).
+ *
+ * The reason to use strings is so we can easily include them in the C
+ * code, and to avoid hardcoding lengths.
+ */
+static inline bool can327_rxbuf_cmp(const u8 *buf, size_t nbytes,
+ const char *reference)
+{
+ size_t ref_len = strlen(reference);
+
+ return (nbytes == ref_len) && !memcmp(buf, reference, ref_len);
+}
+
+static void can327_parse_error(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ /* It's okay to return here:
+ * The outer parsing loop will drop this UART buffer.
+ */
+ return;
+
+ /* Filter possible error messages based on length of RX'd line */
+ if (can327_rxbuf_cmp(elm->rxbuf, len, "UNABLE TO CONNECT")) {
+ netdev_err(elm->dev,
+ "ELM327 reported UNABLE TO CONNECT. Please check your setup.\n");
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUFFER FULL")) {
+ /* This will only happen if the last data line was complete.
+ * Otherwise, can327_parse_frame() will heuristically
+ * emit this kind of error frame instead.
+ */
+ frame->can_id |= CAN_ERR_CRTL;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS ERROR")) {
+ frame->can_id |= CAN_ERR_BUSERROR;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "CAN ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "<RX ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS BUSY")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_OVERLOAD;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "FB ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_TX;
+ } else if (len == 5 && !memcmp(elm->rxbuf, "ERR", 3)) {
+ /* ERR is followed by two digits, hence line length 5 */
+ netdev_err(elm->dev, "ELM327 reported an ERR%c%c. Please power it off and on again.\n",
+ elm->rxbuf[3], elm->rxbuf[4]);
+ frame->can_id |= CAN_ERR_CRTL;
+ } else {
+ /* Something else has happened.
+ * Maybe garbage on the UART line.
+ * Emit a generic error frame.
+ */
+ }
+
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Parse CAN frames coming as ASCII from ELM327.
+ * They can be of various formats:
+ *
+ * 29-bit ID (EFF): 12 34 56 78 D PL PL PL PL PL PL PL PL
+ * 11-bit ID (!EFF): 123 D PL PL PL PL PL PL PL PL
+ *
+ * where D = DLC, PL = payload byte
+ *
+ * Instead of a payload, RTR indicates a remote request.
+ *
+ * We will use the spaces and line length to guess the format.
+ */
+static int can327_parse_frame(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+ int hexlen;
+ int datastart;
+ int i;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_skb(elm->dev, &frame);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Find first non-hex and non-space character:
+ * - In the simplest case, there is none.
+ * - For RTR frames, 'R' is the first non-hex character.
+ * - An error message may replace the end of the data line.
+ */
+ for (hexlen = 0; hexlen <= len; hexlen++) {
+ if (hex_to_bin(elm->rxbuf[hexlen]) < 0 &&
+ elm->rxbuf[hexlen] != ' ') {
+ break;
+ }
+ }
+
+ /* Sanity check whether the line is really a clean hexdump,
+ * or terminated by an error message, or contains garbage.
+ */
+ if (hexlen < len && !isdigit(elm->rxbuf[hexlen]) &&
+ !isupper(elm->rxbuf[hexlen]) && '<' != elm->rxbuf[hexlen] &&
+ ' ' != elm->rxbuf[hexlen]) {
+ /* The line is likely garbled anyway, so bail.
+ * The main code will restart listening.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* Use spaces in CAN ID to distinguish 29 or 11 bit address length.
+ * No out-of-bounds access:
+ * We use the fact that we can always read from elm->rxbuf.
+ */
+ if (elm->rxbuf[2] == ' ' && elm->rxbuf[5] == ' ' &&
+ elm->rxbuf[8] == ' ' && elm->rxbuf[11] == ' ' &&
+ elm->rxbuf[13] == ' ') {
+ frame->can_id = CAN_EFF_FLAG;
+ datastart = 14;
+ } else if (elm->rxbuf[3] == ' ' && elm->rxbuf[5] == ' ') {
+ datastart = 6;
+ } else {
+ /* This is not a well-formatted data line.
+ * Assume it's an error message.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ if (hexlen < datastart) {
+ /* The line is too short to be a valid frame hex dump.
+ * Something interrupted the hex dump or it is invalid.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* From here on all chars up to buf[hexlen] are hex or spaces,
+ * at well-defined offsets.
+ */
+
+ /* Read CAN data length */
+ frame->len = (hex_to_bin(elm->rxbuf[datastart - 2]) << 0);
+
+ /* Read CAN ID */
+ if (frame->can_id & CAN_EFF_FLAG) {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 28) |
+ (hex_to_bin(elm->rxbuf[1]) << 24) |
+ (hex_to_bin(elm->rxbuf[3]) << 20) |
+ (hex_to_bin(elm->rxbuf[4]) << 16) |
+ (hex_to_bin(elm->rxbuf[6]) << 12) |
+ (hex_to_bin(elm->rxbuf[7]) << 8) |
+ (hex_to_bin(elm->rxbuf[9]) << 4) |
+ (hex_to_bin(elm->rxbuf[10]) << 0);
+ } else {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 8) |
+ (hex_to_bin(elm->rxbuf[1]) << 4) |
+ (hex_to_bin(elm->rxbuf[2]) << 0);
+ }
+
+ /* Check for RTR frame */
+ if (elm->rxfill >= hexlen + 3 &&
+ !memcmp(&elm->rxbuf[hexlen], "RTR", 3)) {
+ frame->can_id |= CAN_RTR_FLAG;
+ }
+
+ /* Is the line long enough to hold the advertised payload?
+ * Note: RTR frames have a DLC, but no actual payload.
+ */
+ if (!(frame->can_id & CAN_RTR_FLAG) &&
+ (hexlen < frame->len * 3 + datastart)) {
+ /* Incomplete frame.
+ * Probably the ELM327's RS232 TX buffer was full.
+ * Emit an error frame and exit.
+ */
+ frame->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ frame->len = CAN_ERR_DLC;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ can327_feed_frame_to_netdev(elm, skb);
+
+ /* Signal failure to parse.
+ * The line will be re-parsed as an error line, which will fail.
+ * However, this will correctly drop the state machine back into
+ * command mode.
+ */
+ return -ENODATA;
+ }
+
+ /* Parse the data nibbles. */
+ for (i = 0; i < frame->len; i++) {
+ frame->data[i] =
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i]) << 4) |
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i + 1]));
+ }
+
+ /* Feed the frame to the network layer. */
+ can327_feed_frame_to_netdev(elm, skb);
+
+ return 0;
+}
+
+static void can327_parse_line(struct can327 *elm, size_t len)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Skip empty lines */
+ if (!len)
+ return;
+
+ /* Skip echo lines */
+ if (elm->drop_next_line) {
+ elm->drop_next_line = 0;
+ return;
+ } else if (!memcmp(elm->rxbuf, "AT", 2)) {
+ return;
+ }
+
+ /* Regular parsing */
+ if (elm->state == CAN327_STATE_RECEIVING &&
+ can327_parse_frame(elm, len)) {
+ /* Parse an error line. */
+ can327_parse_error(elm, len);
+
+ /* Start afresh. */
+ can327_kick_into_cmd_mode(elm);
+ }
+}
+
+static void can327_handle_prompt(struct can327 *elm)
+{
+ struct can_frame *frame = &elm->can_frame_to_send;
+ /* Size this buffer for the largest ELM327 line we may generate,
+ * which is currently an 8 byte CAN frame's payload hexdump.
+ * Items in can327_init_script must fit here, too!
+ */
+ char local_txbuf[sizeof("0102030405060708\r")];
+
+ lockdep_assert_held(&elm->lock);
+
+ if (!elm->cmds_todo) {
+ /* Enter CAN monitor mode */
+ can327_send(elm, "ATMA\r", 5);
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+
+ return;
+ }
+
+ /* Reconfigure ELM327 step by step as indicated by elm->cmds_todo */
+ if (test_bit(CAN327_TX_DO_INIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf), "%s",
+ *elm->next_init_cmd);
+
+ elm->next_init_cmd++;
+ if (!(*elm->next_init_cmd)) {
+ clear_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ /* Init finished. */
+ }
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCSM%i\r",
+ !!(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATR%i\r",
+ !(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPC\r");
+ set_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPB%04X\r",
+ elm->can_config);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCP%02X\r",
+ (frame->can_id & CAN_EFF_MASK) >> 24);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%06X\r",
+ frame->can_id & CAN_EFF_MASK & ((1 << 24) - 1));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%03X\r",
+ frame->can_id & CAN_SFF_MASK);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo)) {
+ if (frame->can_id & CAN_RTR_FLAG) {
+ /* Send an RTR frame. Their DLC is fixed.
+ * Some chips don't send them at all.
+ */
+ snprintf(local_txbuf, sizeof(local_txbuf), "ATRTR\r");
+ } else {
+ /* Send a regular CAN data frame */
+ int i;
+
+ for (i = 0; i < frame->len; i++) {
+ snprintf(&local_txbuf[2 * i],
+ sizeof(local_txbuf), "%02X",
+ frame->data[i]);
+ }
+
+ snprintf(&local_txbuf[2 * i], sizeof(local_txbuf),
+ "\r");
+ }
+
+ elm->drop_next_line = 1;
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+ }
+
+ can327_send(elm, local_txbuf, strlen(local_txbuf));
+}
+
+static bool can327_is_ready_char(char c)
+{
+ /* Bits 0xc0 are sometimes set (randomly), hence the mask.
+ * Probably bad hardware.
+ */
+ return (c & 0x3f) == CAN327_READY_CHAR;
+}
+
+static void can327_drop_bytes(struct can327 *elm, size_t i)
+{
+ lockdep_assert_held(&elm->lock);
+
+ memmove(&elm->rxbuf[0], &elm->rxbuf[i], CAN327_SIZE_RXBUF - i);
+ elm->rxfill -= i;
+}
+
+static void can327_parse_rxbuf(struct can327 *elm, size_t first_new_char_idx)
+{
+ size_t len, pos;
+
+ lockdep_assert_held(&elm->lock);
+
+ switch (elm->state) {
+ case CAN327_STATE_NOTINIT:
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_GETDUMMYCHAR:
+ /* Wait for 'y' or '>' */
+ for (pos = 0; pos < elm->rxfill; pos++) {
+ if (elm->rxbuf[pos] == CAN327_DUMMY_CHAR) {
+ can327_send(elm, "\r", 1);
+ elm->state = CAN327_STATE_GETPROMPT;
+ pos++;
+ break;
+ } else if (can327_is_ready_char(elm->rxbuf[pos])) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ pos++;
+ break;
+ }
+ }
+
+ can327_drop_bytes(elm, pos);
+ break;
+
+ case CAN327_STATE_GETPROMPT:
+ /* Wait for '>' */
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1]))
+ can327_handle_prompt(elm);
+
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_RECEIVING:
+ /* Find <CR> delimiting feedback lines. */
+ len = first_new_char_idx;
+ while (len < elm->rxfill && elm->rxbuf[len] != '\r')
+ len++;
+
+ if (len == CAN327_SIZE_RXBUF) {
+ /* Assume the buffer ran full with garbage.
+ * Did we even connect at the right baud rate?
+ */
+ netdev_err(elm->dev,
+ "RX buffer overflow. Faulty ELM327 or UART?\n");
+ can327_uart_side_failure(elm);
+ } else if (len == elm->rxfill) {
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1])) {
+ /* The ELM327's AT ST response timeout ran out,
+ * so we got a prompt.
+ * Clear RX buffer and restart listening.
+ */
+ elm->rxfill = 0;
+
+ can327_handle_prompt(elm);
+ }
+
+ /* No <CR> found - we haven't received a full line yet.
+ * Wait for more data.
+ */
+ } else {
+ /* We have a full line to parse. */
+ can327_parse_line(elm, len);
+
+ /* Remove parsed data from RX buffer. */
+ can327_drop_bytes(elm, len + 1);
+
+ /* More data to parse? */
+ if (elm->rxfill)
+ can327_parse_rxbuf(elm, 0);
+ }
+ }
+}
+
+static int can327_netdev_open(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ int err;
+
+ spin_lock_bh(&elm->lock);
+
+ if (!elm->tty) {
+ spin_unlock_bh(&elm->lock);
+ return -ENODEV;
+ }
+
+ if (elm->uart_side_failure)
+ netdev_warn(elm->dev,
+ "Reopening netdev after a UART side fault has been detected.\n");
+
+ /* Clear TTY buffers */
+ elm->rxfill = 0;
+ elm->txleft = 0;
+
+ /* open_candev() checks for elm->can.bittiming.bitrate != 0 */
+ err = open_candev(dev);
+ if (err) {
+ spin_unlock_bh(&elm->lock);
+ return err;
+ }
+
+ can327_init_device(elm);
+ spin_unlock_bh(&elm->lock);
+
+ err = can_rx_offload_add_manual(dev, &elm->offload, CAN327_NAPI_WEIGHT);
+ if (err) {
+ close_candev(dev);
+ return err;
+ }
+
+ can_rx_offload_enable(&elm->offload);
+
+ elm->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int can327_netdev_close(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+
+ /* Interrupt whatever the ELM327 is doing right now */
+ spin_lock_bh(&elm->lock);
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ spin_unlock_bh(&elm->lock);
+
+ netif_stop_queue(dev);
+
+ /* Give UART one final chance to flush. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+ flush_work(&elm->tx_work);
+
+ can_rx_offload_disable(&elm->offload);
+ elm->can.state = CAN_STATE_STOPPED;
+ can_rx_offload_del(&elm->offload);
+ close_candev(dev);
+
+ return 0;
+}
+
+/* Send a can_frame to a TTY. */
+static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ struct can_frame *frame = (struct can_frame *)skb->data;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ /* We shouldn't get here after a hardware fault:
+ * can_bus_off() calls netif_carrier_off()
+ */
+ if (elm->uart_side_failure) {
+ WARN_ON_ONCE(elm->uart_side_failure);
+ goto out;
+ }
+
+ netif_stop_queue(dev);
+
+ /* BHs are already disabled, so no spin_lock_bh().
+ * See Documentation/networking/netdevices.txt
+ */
+ spin_lock(&elm->lock);
+ can327_send_frame(elm, frame);
+ spin_unlock(&elm->lock);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += frame->can_id & CAN_RTR_FLAG ? 0 : frame->len;
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops can327_netdev_ops = {
+ .ndo_open = can327_netdev_open,
+ .ndo_stop = can327_netdev_close,
+ .ndo_start_xmit = can327_netdev_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static bool can327_is_valid_rx_char(u8 c)
+{
+ static const bool lut_char_is_valid['z'] = {
+ ['\r'] = true,
+ [' '] = true,
+ ['.'] = true,
+ ['0'] = true, true, true, true, true,
+ ['5'] = true, true, true, true, true,
+ ['<'] = true,
+ [CAN327_READY_CHAR] = true,
+ ['?'] = true,
+ ['A'] = true, true, true, true, true, true, true,
+ ['H'] = true, true, true, true, true, true, true,
+ ['O'] = true, true, true, true, true, true, true,
+ ['V'] = true, true, true, true, true,
+ ['a'] = true,
+ ['b'] = true,
+ ['v'] = true,
+ [CAN327_DUMMY_CHAR] = true,
+ };
+ BUILD_BUG_ON(CAN327_DUMMY_CHAR >= 'z');
+
+ return (c < ARRAY_SIZE(lut_char_is_valid) && lut_char_is_valid[c]);
+}
+
+/* Handle incoming ELM327 ASCII data.
+ * This will not be re-entered while running, but other ldisc
+ * functions may be called in parallel.
+ */
+static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp,
+ const char *fp, int count)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+ size_t first_new_char_idx;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ /* Store old rxfill, so can327_parse_rxbuf() will have
+ * the option of skipping already checked characters.
+ */
+ first_new_char_idx = elm->rxfill;
+
+ while (count-- && elm->rxfill < CAN327_SIZE_RXBUF) {
+ if (fp && *fp++) {
+ netdev_err(elm->dev,
+ "Error in received character stream. Check your wiring.");
+
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ /* Ignore NUL characters, which the PIC microcontroller may
+ * inadvertently insert due to a known hardware bug.
+ * See ELM327 documentation, which refers to a Microchip PIC
+ * bug description.
+ */
+ if (*cp) {
+ /* Check for stray characters on the UART line.
+ * Likely caused by bad hardware.
+ */
+ if (!can327_is_valid_rx_char(*cp)) {
+ netdev_err(elm->dev,
+ "Received illegal character %02x.\n",
+ *cp);
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ elm->rxbuf[elm->rxfill++] = *cp;
+ }
+
+ cp++;
+ }
+
+ if (count >= 0) {
+ netdev_err(elm->dev,
+ "Receive buffer overflowed. Bad chip or wiring? count = %i",
+ count);
+
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ can327_parse_rxbuf(elm, first_new_char_idx);
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Write out remaining transmit buffer.
+ * Scheduled when TTY is writable.
+ */
+static void can327_ldisc_tx_worker(struct work_struct *work)
+{
+ struct can327 *elm = container_of(work, struct can327, tx_work);
+ ssize_t written;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ if (elm->txleft) {
+ written = elm->tty->ops->write(elm->tty, elm->txhead,
+ elm->txleft);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ elm->txleft -= written;
+ elm->txhead += written;
+ }
+
+ if (!elm->txleft)
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Called by the driver when there's room for more data. */
+static void can327_ldisc_tx_wakeup(struct tty_struct *tty)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+
+ schedule_work(&elm->tx_work);
+}
+
+/* ELM327 can only handle bitrates that are integer divisors of 500 kHz,
+ * or 7/8 of that. Divisors are 1 to 64.
+ * Currently we don't implement support for 7/8 rates.
+ */
+static const u32 can327_bitrate_const[] = {
+ 7812, 7936, 8064, 8196, 8333, 8474, 8620, 8771,
+ 8928, 9090, 9259, 9433, 9615, 9803, 10000, 10204,
+ 10416, 10638, 10869, 11111, 11363, 11627, 11904, 12195,
+ 12500, 12820, 13157, 13513, 13888, 14285, 14705, 15151,
+ 15625, 16129, 16666, 17241, 17857, 18518, 19230, 20000,
+ 20833, 21739, 22727, 23809, 25000, 26315, 27777, 29411,
+ 31250, 33333, 35714, 38461, 41666, 45454, 50000, 55555,
+ 62500, 71428, 83333, 100000, 125000, 166666, 250000, 500000
+};
+
+static int can327_ldisc_open(struct tty_struct *tty)
+{
+ struct net_device *dev;
+ struct can327 *elm;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ dev = alloc_candev(sizeof(struct can327), 0);
+ if (!dev)
+ return -ENFILE;
+ elm = netdev_priv(dev);
+
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ spin_lock_init(&elm->lock);
+ INIT_WORK(&elm->tx_work, can327_ldisc_tx_worker);
+
+ /* Configure CAN metadata */
+ elm->can.bitrate_const = can327_bitrate_const;
+ elm->can.bitrate_const_cnt = ARRAY_SIZE(can327_bitrate_const);
+ elm->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ /* Configure netdev interface */
+ elm->dev = dev;
+ dev->netdev_ops = &can327_netdev_ops;
+
+ /* Mark ldisc channel as alive */
+ elm->tty = tty;
+ tty->disc_data = elm;
+
+ /* Let 'er rip */
+ err = register_candev(elm->dev);
+ if (err) {
+ free_candev(elm->dev);
+ return err;
+ }
+
+ netdev_info(elm->dev, "can327 on %s.\n", tty->name);
+
+ return 0;
+}
+
+/* Close down a can327 channel.
+ * This means flushing out any pending queues, and then returning.
+ * This call is serialized against other ldisc functions:
+ * Once this is called, no other ldisc function of ours is entered.
+ *
+ * We also use this function for a hangup event.
+ */
+static void can327_ldisc_close(struct tty_struct *tty)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+
+ /* unregister_netdev() calls .ndo_stop() so we don't have to.
+ * Our .ndo_stop() also flushes the TTY write wakeup handler,
+ * so we can safely set elm->tty = NULL after this.
+ */
+ unregister_candev(elm->dev);
+
+ /* Mark channel as dead */
+ spin_lock_bh(&elm->lock);
+ tty->disc_data = NULL;
+ elm->tty = NULL;
+ spin_unlock_bh(&elm->lock);
+
+ netdev_info(elm->dev, "can327 off %s.\n", tty->name);
+
+ free_candev(elm->dev);
+}
+
+static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+ unsigned int tmp;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strnlen(elm->dev->name, IFNAMSIZ - 1) + 1;
+ if (copy_to_user((void __user *)arg, elm->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops can327_ldisc = {
+ .owner = THIS_MODULE,
+ .name = "can327",
+ .num = N_CAN327,
+ .receive_buf = can327_ldisc_rx,
+ .write_wakeup = can327_ldisc_tx_wakeup,
+ .open = can327_ldisc_open,
+ .close = can327_ldisc_close,
+ .ioctl = can327_ldisc_ioctl,
+};
+
+static int __init can327_init(void)
+{
+ int status;
+
+ status = tty_register_ldisc(&can327_ldisc);
+ if (status)
+ pr_err("Can't register line discipline\n");
+
+ return status;
+}
+
+static void __exit can327_exit(void)
+{
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
+ */
+ tty_unregister_ldisc(&can327_ldisc);
+}
+
+module_init(can327_init);
+module_exit(can327_exit);
+
+MODULE_ALIAS_LDISC(N_CAN327);
+MODULE_DESCRIPTION("ELM327 based CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Max Staudt <max@enpas.org>");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 64990bf20fdc..14ac7c0ee04c 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -1087,7 +1087,7 @@ clear:
/**
* ctucan_interrupt() - CAN Isr
* @irq: irq number
- * @dev_id: device id poniter
+ * @dev_id: device id pointer
*
* This is the CTU CAN FD ISR. It checks for the type of interrupt
* and invokes the corresponding ISR.
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
index af2901db473c..633687d6b6c0 100644
--- a/drivers/net/can/dev/Makefile
+++ b/drivers/net/can/dev/Makefile
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y += bittiming.o
-can-dev-y += dev.o
-can-dev-y += length.o
-can-dev-y += netlink.o
-can-dev-y += rx-offload.o
-can-dev-y += skb.o
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+
+can-dev-y += skb.o
+
+can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += dev.o
+can-dev-$(CONFIG_CAN_NETLINK) += length.o
+can-dev-$(CONFIG_CAN_NETLINK) += netlink.o
+can-dev-$(CONFIG_CAN_RX_OFFLOAD) += rx-offload.o
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index c1e76f0a5064..7ae80763c960 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -4,205 +4,8 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/units.h>
#include <linux/can/dev.h>
-#ifdef CONFIG_CAN_CALC_BITTIMING
-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-
-/* Bit-timing calculation derived from:
- *
- * Code based on LinCAN sources and H8S2638 project
- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
- * Copyright 2005 Stanislav Marek
- * email: pisa@cmp.felk.cvut.cz
- *
- * Calculates proper bit-timing parameters for a specified bit-rate
- * and sample-point, which can then be used to set the bit-timing
- * registers of the CAN controller. You can find more information
- * in the header file linux/can/netlink.h.
- */
-static int
-can_update_sample_point(const struct can_bittiming_const *btc,
- const unsigned int sample_point_nominal, const unsigned int tseg,
- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
- unsigned int *sample_point_error_ptr)
-{
- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
- unsigned int sample_point, best_sample_point = 0;
- unsigned int tseg1, tseg2;
- int i;
-
- for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
- 1000 - i;
- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
- tseg1 = tseg - tseg2;
- if (tseg1 > btc->tseg1_max) {
- tseg1 = btc->tseg1_max;
- tseg2 = tseg - tseg1;
- }
-
- sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
- (tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
-
- if (sample_point <= sample_point_nominal &&
- sample_point_error < best_sample_point_error) {
- best_sample_point = sample_point;
- best_sample_point_error = sample_point_error;
- *tseg1_ptr = tseg1;
- *tseg2_ptr = tseg2;
- }
- }
-
- if (sample_point_error_ptr)
- *sample_point_error_ptr = best_sample_point_error;
-
- return best_sample_point;
-}
-
-int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
- unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
- unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
- unsigned int best_tseg = 0; /* current best value for tseg */
- unsigned int best_brp = 0; /* current best value for brp */
- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
- u64 v64;
-
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800 * KILO /* BPS */)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500 * KILO /* BPS */)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
-
- /* tseg even = round down, odd = round up */
- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_SYNC_SEG + tseg / 2;
-
- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
-
- /* choose brp step which is possible in system */
- brp = (brp / btc->brp_inc) * btc->brp_inc;
- if (brp < btc->brp_min || brp > btc->brp_max)
- continue;
-
- bitrate = priv->clock.freq / (brp * tsegall);
- bitrate_error = abs(bt->bitrate - bitrate);
-
- /* tseg brp biterror */
- if (bitrate_error > best_bitrate_error)
- continue;
-
- /* reset sample point error if we have a better bitrate */
- if (bitrate_error < best_bitrate_error)
- best_sample_point_error = UINT_MAX;
-
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
- &tseg1, &tseg2, &sample_point_error);
- if (sample_point_error >= best_sample_point_error)
- continue;
-
- best_sample_point_error = sample_point_error;
- best_bitrate_error = bitrate_error;
- best_tseg = tseg / 2;
- best_brp = brp;
-
- if (bitrate_error == 0 && sample_point_error == 0)
- break;
- }
-
- if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
- do_div(v64, bt->bitrate);
- bitrate_error = (u32)v64;
- if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
- }
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
- }
-
- /* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
- best_tseg, &tseg1, &tseg2,
- NULL);
-
- v64 = (u64)best_brp * 1000 * 1000 * 1000;
- do_div(v64, priv->clock.freq);
- bt->tq = (u32)v64;
- bt->prop_seg = tseg1 / 2;
- bt->phase_seg1 = tseg1 - bt->prop_seg;
- bt->phase_seg2 = tseg2;
-
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
-
- bt->brp = best_brp;
-
- /* real bitrate */
- bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
-
- return 0;
-}
-
-void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
- const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
-
-{
- if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
- return;
-
- *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
-
- /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
- * delay compensation" (TDC) is only applicable if data BRP is
- * one or two.
- */
- if (dbt->brp == 1 || dbt->brp == 2) {
- /* Sample point in clock periods */
- u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
- dbt->phase_seg1) * dbt->brp;
-
- if (sample_point_in_tc < tdc_const->tdco_min)
- return;
- tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
- *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
- }
-}
-#endif /* CONFIG_CAN_CALC_BITTIMING */
-
/* Checks the validity of the specified bit-timing parameters prop_seg,
* phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
* prescaler value brp. You can find more information in the header
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
new file mode 100644
index 000000000000..d3caa040614d
--- /dev/null
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/units.h>
+#include <linux/can/dev.h>
+
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ const unsigned int sample_point_nominal, const unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if (sample_point <= sample_point_nominal &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+
+ /* Use CiA recommended sample points */
+ if (bt->sample_point) {
+ sample_point_nominal = bt->sample_point;
+ } else {
+ if (bt->bitrate > 800 * KILO /* BPS */)
+ sample_point_nominal = 750;
+ else if (bt->bitrate > 500 * KILO /* BPS */)
+ sample_point_nominal = 800;
+ else
+ sample_point_nominal = 875;
+ }
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error >= best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-tenth of a percent */
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ netdev_err(dev,
+ "bitrate error %d.%d%% too high\n",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EDOM;
+ }
+ netdev_warn(dev, "bitrate error %d.%d%%\n",
+ bitrate_error / 10, bitrate_error % 10);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ /* check for sjw user settings */
+ if (!bt->sjw || !btc->sjw_max) {
+ bt->sjw = 1;
+ } else {
+ /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+ if (bt->sjw > btc->sjw_max)
+ bt->sjw = btc->sjw_max;
+ /* bt->sjw must not be higher than tseg2 */
+ if (tseg2 < bt->sjw)
+ bt->sjw = tseg2;
+ }
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+
+ return 0;
+}
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
+
+{
+ if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
+ return;
+
+ *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+
+ /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
+ * delay compensation" (TDC) is only applicable if data BRP is
+ * one or two.
+ */
+ if (dbt->brp == 1 || dbt->brp == 2) {
+ /* Sample point in clock periods */
+ u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ if (sample_point_in_tc < tdc_const->tdco_min)
+ return;
+ tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
+ *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
+ }
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 96c9d9db00cf..523eaacfe29e 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -4,7 +4,6 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
@@ -17,12 +16,6 @@
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#define MOD_DESC "CAN device driver interface"
-
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
{
@@ -513,7 +506,7 @@ static __init int can_dev_init(void)
err = can_netlink_register();
if (!err)
- pr_info(MOD_DESC "\n");
+ pr_info("CAN device driver interface\n");
return err;
}
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 7633d98e3912..8efa22d9f214 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -176,7 +176,8 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->bittiming_const && !priv->do_set_bittiming)
+ if (!priv->bittiming_const && !priv->do_set_bittiming &&
+ !priv->bitrate_const)
return -EOPNOTSUPP;
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
@@ -278,7 +279,8 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming &&
+ !priv->data_bitrate_const)
return -EOPNOTSUPP;
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
@@ -509,7 +511,8 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if ((priv->bittiming.bitrate &&
+ if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET &&
+ priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN &&
nla_put(skb, IFLA_CAN_BITTIMING,
sizeof(priv->bittiming), &priv->bittiming)) ||
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 61660248c69e..8bb62dd864c8 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -5,6 +5,14 @@
*/
#include <linux/can/dev.h>
+#include <linux/can/netlink.h>
+#include <linux/module.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
/* Local echo of CAN messages
*
@@ -252,3 +260,67 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
return skb;
}
EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* perform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ }
+
+ return true;
+}
+
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (skb->protocol == htons(ETH_P_CAN)) {
+ if (unlikely(skb->len != CAN_MTU ||
+ cfd->len > CAN_MAX_DLEN))
+ goto inval_skb;
+ } else if (skb->protocol == htons(ETH_P_CANFD)) {
+ if (unlikely(skb->len != CANFD_MTU ||
+ cfd->len > CANFD_MAX_DLEN))
+ goto inval_skb;
+ } else {
+ goto inval_skb;
+ }
+
+ if (!can_skb_headroom_valid(dev, skb)) {
+ goto inval_skb;
+ } else if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ netdev_info_once(dev,
+ "interface in listen only mode, dropping skb\n");
+ goto inval_skb;
+ }
+
+ return false;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+EXPORT_SYMBOL_GPL(can_dropped_invalid_skb);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 76df4807d366..4c47c1055eff 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1646,7 +1646,6 @@ static int grcan_probe(struct platform_device *ofdev)
*/
sysid_parent = of_find_node_by_path("/ambapp0");
if (sysid_parent) {
- of_node_get(sysid_parent);
err = of_property_read_u32(sysid_parent, "systemid", &sysid);
if (!err && ((sysid & GRLIB_VERSION_MASK) >=
GRCAN_TXBUG_SAFE_GRLIB_VERSION))
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index 45ad1b3f0cd0..fc2afab36279 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CAN_M_CAN
tristate "Bosch M_CAN support"
+ select CAN_RX_OFFLOAD
help
Say Y here if you want support for Bosch M_CAN controller framework.
This is common support for devices that embed the Bosch M_CAN IP.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 5d0c82d8b9a9..afaaeb610c00 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -529,7 +529,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
/* acknowledge rx fifo 0 */
m_can_write(cdev, M_CAN_RXF0A, fgi);
- timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc);
+ timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
m_can_receive_skb(cdev, skb, timestamp);
@@ -1030,7 +1030,7 @@ static int m_can_echo_tx_event(struct net_device *dev)
}
msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
- timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
+ timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
/* ack txe element */
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
@@ -1348,10 +1348,12 @@ static void m_can_chip_config(struct net_device *dev)
/* set bittiming params */
m_can_set_bittiming(dev);
- /* enable internal timestamp generation, with a prescalar of 16. The
- * prescalar is applied to the nominal bit timing
+ /* enable internal timestamp generation, with a prescaler of 16. The
+ * prescaler is applied to the nominal bit timing
*/
- m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf));
+ m_can_write(cdev, M_CAN_TSCC,
+ FIELD_PREP(TSCC_TCP_MASK, 0xf) |
+ FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
m_can_config_endisable(cdev, false);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 40a11445d021..ba42cef10a53 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1332,7 +1332,10 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+ if (is_v3u(gpriv))
+ rcar_canfd_write(priv->base, RCANFD_V3U_DCFG(ch), cfg);
+ else
+ rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
brp, sjw, tseg1, tseg2);
} else {
diff --git a/drivers/net/can/slcan/Makefile b/drivers/net/can/slcan/Makefile
new file mode 100644
index 000000000000..8a88e484ee21
--- /dev/null
+++ b/drivers/net/can/slcan/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_SLCAN) += slcan.o
+
+slcan-objs :=
+slcan-objs += slcan-core.o
+slcan-objs += slcan-ethtool.o
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan/slcan-core.c
index 64a3aee8a7da..54d29a410ad5 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -54,8 +54,10 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/can.h>
+#include <linux/can/dev.h>
#include <linux/can/skb.h>
-#include <linux/can/can-ml.h>
+
+#include "slcan.h"
MODULE_ALIAS_LDISC(N_SLCAN);
MODULE_DESCRIPTION("serial line CAN interface");
@@ -76,8 +78,13 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
#define SLC_CMD_LEN 1
#define SLC_SFF_ID_LEN 3
#define SLC_EFF_ID_LEN 8
-
+#define SLC_STATE_LEN 1
+#define SLC_STATE_BE_RXCNT_LEN 3
+#define SLC_STATE_BE_TXCNT_LEN 3
+#define SLC_STATE_FRAME_LEN (1 + SLC_CMD_LEN + SLC_STATE_BE_RXCNT_LEN + \
+ SLC_STATE_BE_TXCNT_LEN)
struct slcan {
+ struct can_priv can;
int magic;
/* Various fields. */
@@ -96,10 +103,42 @@ struct slcan {
unsigned long flags; /* Flag values/ mode etc */
#define SLF_INUSE 0 /* Channel in use */
#define SLF_ERROR 1 /* Parity, etc. error */
+#define SLF_XCMD 2 /* Command transmission */
+ unsigned long cmd_flags; /* Command flags */
+#define CF_ERR_RST 0 /* Reset errors on open */
+ wait_queue_head_t xcmd_wait; /* Wait queue for commands */
+ /* transmission */
};
static struct net_device **slcan_devs;
+static const u32 slcan_bitrate_const[] = {
+ 10000, 20000, 50000, 100000, 125000,
+ 250000, 500000, 800000, 1000000
+};
+
+bool slcan_err_rst_on_open(struct net_device *ndev)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ return !!test_bit(CF_ERR_RST, &sl->cmd_flags);
+}
+
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (on)
+ set_bit(CF_ERR_RST, &sl->cmd_flags);
+ else
+ clear_bit(CF_ERR_RST, &sl->cmd_flags);
+
+ return 0;
+}
+
/************************************************************************
* SLCAN ENCAPSULATION FORMAT *
************************************************************************/
@@ -140,88 +179,289 @@ static struct net_device **slcan_devs;
************************************************************************/
/* Send one completely decapsulated can_frame to the network layer */
-static void slc_bump(struct slcan *sl)
+static void slc_bump_frame(struct slcan *sl)
{
struct sk_buff *skb;
- struct can_frame cf;
+ struct can_frame *cf;
int i, tmp;
u32 tmpid;
char *cmd = sl->rbuff;
- memset(&cf, 0, sizeof(cf));
+ skb = alloc_can_skb(sl->dev, &cf);
+ if (unlikely(!skb)) {
+ sl->dev->stats.rx_dropped++;
+ return;
+ }
switch (*cmd) {
case 'r':
- cf.can_id = CAN_RTR_FLAG;
+ cf->can_id = CAN_RTR_FLAG;
fallthrough;
case 't':
/* store dlc ASCII value and terminate SFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
+ cf->len = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
/* point to payload data behind the dlc */
cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
break;
case 'R':
- cf.can_id = CAN_RTR_FLAG;
+ cf->can_id = CAN_RTR_FLAG;
fallthrough;
case 'T':
- cf.can_id |= CAN_EFF_FLAG;
+ cf->can_id |= CAN_EFF_FLAG;
/* store dlc ASCII value and terminate EFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
+ cf->len = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
/* point to payload data behind the dlc */
cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
break;
default:
- return;
+ goto decode_failed;
}
if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
- return;
+ goto decode_failed;
- cf.can_id |= tmpid;
+ cf->can_id |= tmpid;
/* get len from sanitized ASCII value */
- if (cf.len >= '0' && cf.len < '9')
- cf.len -= '0';
+ if (cf->len >= '0' && cf->len < '9')
+ cf->len -= '0';
else
- return;
+ goto decode_failed;
/* RTR frames may have a dlc > 0 but they never have any data bytes */
- if (!(cf.can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf.len; i++) {
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i++) {
tmp = hex_to_bin(*cmd++);
if (tmp < 0)
- return;
- cf.data[i] = (tmp << 4);
+ goto decode_failed;
+
+ cf->data[i] = (tmp << 4);
tmp = hex_to_bin(*cmd++);
if (tmp < 0)
- return;
- cf.data[i] |= tmp;
+ goto decode_failed;
+
+ cf->data[i] |= tmp;
}
}
- skb = dev_alloc_skb(sizeof(struct can_frame) +
- sizeof(struct can_skb_priv));
- if (!skb)
+ sl->dev->stats.rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ sl->dev->stats.rx_bytes += cf->len;
+
+ netif_rx(skb);
+ return;
+
+decode_failed:
+ sl->dev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+}
+
+/* A change state frame must contain state info and receive and transmit
+ * error counters.
+ *
+ * Examples:
+ *
+ * sb256256 : state bus-off: rx counter 256, tx counter 256
+ * sa057033 : state active, rx counter 57, tx counter 33
+ */
+static void slc_bump_state(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ u32 rxerr, txerr;
+ enum can_state state, rx_state, tx_state;
+
+ switch (cmd[1]) {
+ case 'a':
+ state = CAN_STATE_ERROR_ACTIVE;
+ break;
+ case 'w':
+ state = CAN_STATE_ERROR_WARNING;
+ break;
+ case 'p':
+ state = CAN_STATE_ERROR_PASSIVE;
+ break;
+ case 'b':
+ state = CAN_STATE_BUS_OFF;
+ break;
+ default:
+ return;
+ }
+
+ if (state == sl->can.state || sl->rcount < SLC_STATE_FRAME_LEN)
return;
- skb->dev = sl->dev;
- skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ cmd += SLC_STATE_BE_RXCNT_LEN + SLC_CMD_LEN + 1;
+ cmd[SLC_STATE_BE_TXCNT_LEN] = 0;
+ if (kstrtou32(cmd, 10, &txerr))
+ return;
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = sl->dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
+ *cmd = 0;
+ cmd -= SLC_STATE_BE_RXCNT_LEN;
+ if (kstrtou32(cmd, 10, &rxerr))
+ return;
- skb_put_data(skb, &cf, sizeof(struct can_frame));
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ } else {
+ cf = NULL;
+ }
- sl->dev->stats.rx_packets++;
- if (!(cf.can_id & CAN_RTR_FLAG))
- sl->dev->stats.rx_bytes += cf.len;
+ tx_state = txerr >= rxerr ? state : 0;
+ rx_state = txerr <= rxerr ? state : 0;
+ can_change_state(dev, cf, tx_state, rx_state);
- netif_rx(skb);
+ if (state == CAN_STATE_BUS_OFF)
+ can_bus_off(dev);
+
+ if (skb)
+ netif_rx(skb);
+}
+
+/* An error frame can contain more than one type of error.
+ *
+ * Examples:
+ *
+ * e1a : len 1, errors: ACK error
+ * e3bcO: len 3, errors: Bit0 error, CRC error, Tx overrun error
+ */
+static void slc_bump_err(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ bool rx_errors = false, tx_errors = false, rx_over_errors = false;
+ int i, len;
+
+ /* get len from sanitized ASCII value */
+ len = cmd[1];
+ if (len >= '0' && len < '9')
+ len -= '0';
+ else
+ return;
+
+ if ((len + SLC_CMD_LEN + 1) > sl->rcount)
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ cmd += SLC_CMD_LEN + 1;
+ for (i = 0; i < len; i++, cmd++) {
+ switch (*cmd) {
+ case 'a':
+ netdev_dbg(dev, "ACK error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
+
+ break;
+ case 'b':
+ netdev_dbg(dev, "Bit0 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+
+ break;
+ case 'B':
+ netdev_dbg(dev, "Bit1 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+
+ break;
+ case 'c':
+ netdev_dbg(dev, "CRC error\n");
+ rx_errors = true;
+ if (skb) {
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+
+ break;
+ case 'f':
+ netdev_dbg(dev, "Form Error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+
+ break;
+ case 'o':
+ netdev_dbg(dev, "Rx overrun error\n");
+ rx_over_errors = true;
+ rx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
+ break;
+ case 'O':
+ netdev_dbg(dev, "Tx overrun error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_TX_OVERFLOW;
+ }
+
+ break;
+ case 's':
+ netdev_dbg(dev, "Stuff error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+
+ break;
+ default:
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return;
+ }
+ }
+
+ if (rx_errors)
+ dev->stats.rx_errors++;
+
+ if (rx_over_errors)
+ dev->stats.rx_over_errors++;
+
+ if (tx_errors)
+ dev->stats.tx_errors++;
+
+ if (skb)
+ netif_rx(skb);
+}
+
+static void slc_bump(struct slcan *sl)
+{
+ switch (sl->rbuff[0]) {
+ case 'r':
+ fallthrough;
+ case 't':
+ fallthrough;
+ case 'R':
+ fallthrough;
+ case 'T':
+ return slc_bump_frame(sl);
+ case 'e':
+ return slc_bump_err(sl);
+ case 's':
+ return slc_bump_state(sl);
+ default:
+ return;
+ }
}
/* parse tty input stream */
@@ -318,12 +558,22 @@ static void slcan_transmit(struct work_struct *work)
spin_lock_bh(&sl->lock);
/* First make sure we're connected. */
- if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
+ if (!sl->tty || sl->magic != SLCAN_MAGIC ||
+ (unlikely(!netif_running(sl->dev)) &&
+ likely(!test_bit(SLF_XCMD, &sl->flags)))) {
spin_unlock_bh(&sl->lock);
return;
}
if (sl->xleft <= 0) {
+ if (unlikely(test_bit(SLF_XCMD, &sl->flags))) {
+ clear_bit(SLF_XCMD, &sl->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ wake_up(&sl->xcmd_wait);
+ return;
+ }
+
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->dev->stats.tx_packets++;
@@ -365,7 +615,7 @@ static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock(&sl->lock);
if (!netif_running(dev)) {
spin_unlock(&sl->lock);
- printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
+ netdev_warn(dev, "xmit: iface is down\n");
goto out;
}
if (sl->tty == NULL) {
@@ -387,17 +637,63 @@ out:
* Routines looking at netdevice side.
******************************************/
+static int slcan_transmit_cmd(struct slcan *sl, const unsigned char *cmd)
+{
+ int ret, actual, n;
+
+ spin_lock(&sl->lock);
+ if (!sl->tty) {
+ spin_unlock(&sl->lock);
+ return -ENODEV;
+ }
+
+ n = snprintf(sl->xbuff, sizeof(sl->xbuff), "%s", cmd);
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, n);
+ sl->xleft = n - actual;
+ sl->xhead = sl->xbuff + actual;
+ set_bit(SLF_XCMD, &sl->flags);
+ spin_unlock(&sl->lock);
+ ret = wait_event_interruptible_timeout(sl->xcmd_wait,
+ !test_bit(SLF_XCMD, &sl->flags),
+ HZ);
+ clear_bit(SLF_XCMD, &sl->flags);
+ if (ret == -ERESTARTSYS)
+ return ret;
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
/* Netdevice UP -> DOWN routine */
static int slc_close(struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
+ int err;
spin_lock_bh(&sl->lock);
if (sl->tty) {
+ if (sl->can.bittiming.bitrate &&
+ sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ spin_unlock_bh(&sl->lock);
+ err = slcan_transmit_cmd(sl, "C\r");
+ spin_lock_bh(&sl->lock);
+ if (err)
+ netdev_warn(dev,
+ "failed to send close command 'C\\r'\n");
+ }
+
/* TTY discipline is running. */
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
}
netif_stop_queue(dev);
+ close_candev(dev);
+ sl->can.state = CAN_STATE_STOPPED;
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNKNOWN)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNSET;
+
sl->rcount = 0;
sl->xleft = 0;
spin_unlock_bh(&sl->lock);
@@ -409,20 +705,77 @@ static int slc_close(struct net_device *dev)
static int slc_open(struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
+ unsigned char cmd[SLC_MTU];
+ int err, s;
if (sl->tty == NULL)
return -ENODEV;
- sl->flags &= (1 << SLF_INUSE);
+ /* The baud rate is not set with the command
+ * `ip link set <iface> type can bitrate <baud>' and therefore
+ * can.bittiming.bitrate is CAN_BITRATE_UNSET (0), causing
+ * open_candev() to fail. So let's set to a fake value.
+ */
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNSET)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNKNOWN;
+
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ return err;
+ }
+
+ sl->flags &= BIT(SLF_INUSE);
+
+ if (sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ for (s = 0; s < ARRAY_SIZE(slcan_bitrate_const); s++) {
+ if (sl->can.bittiming.bitrate == slcan_bitrate_const[s])
+ break;
+ }
+
+ /* The CAN framework has already validate the bitrate value,
+ * so we can avoid to check if `s' has been properly set.
+ */
+
+ snprintf(cmd, sizeof(cmd), "C\rS%d\r", s);
+ err = slcan_transmit_cmd(sl, cmd);
+ if (err) {
+ netdev_err(dev,
+ "failed to send bitrate command 'C\\rS%d\\r'\n",
+ s);
+ goto cmd_transmit_failed;
+ }
+
+ if (test_bit(CF_ERR_RST, &sl->cmd_flags)) {
+ err = slcan_transmit_cmd(sl, "F\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send error command 'F\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+
+ err = slcan_transmit_cmd(sl, "O\r");
+ if (err) {
+ netdev_err(dev, "failed to send open command 'O\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+
+ sl->can.state = CAN_STATE_ERROR_ACTIVE;
netif_start_queue(dev);
return 0;
+
+cmd_transmit_failed:
+ close_candev(dev);
+ return err;
}
-/* Hook the destructor so we can free slcan devs at the right point in time */
-static void slc_free_netdev(struct net_device *dev)
+static void slc_dealloc(struct slcan *sl)
{
- int i = dev->base_addr;
+ int i = sl->dev->base_addr;
+ free_candev(sl->dev);
slcan_devs[i] = NULL;
}
@@ -438,24 +791,6 @@ static const struct net_device_ops slc_netdev_ops = {
.ndo_change_mtu = slcan_change_mtu,
};
-static void slc_setup(struct net_device *dev)
-{
- dev->netdev_ops = &slc_netdev_ops;
- dev->needs_free_netdev = true;
- dev->priv_destructor = slc_free_netdev;
-
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->tx_queue_len = 10;
-
- dev->mtu = CAN_MTU;
- dev->type = ARPHRD_CAN;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
- dev->features = NETIF_F_HW_CSUM;
-}
-
/******************************************
Routines looking at TTY side.
******************************************/
@@ -518,11 +853,8 @@ static void slc_sync(void)
static struct slcan *slc_alloc(void)
{
int i;
- char name[IFNAMSIZ];
struct net_device *dev = NULL;
- struct can_ml_priv *can_ml;
struct slcan *sl;
- int size;
for (i = 0; i < maxdev; i++) {
dev = slcan_devs[i];
@@ -535,22 +867,24 @@ static struct slcan *slc_alloc(void)
if (i >= maxdev)
return NULL;
- sprintf(name, "slcan%d", i);
- size = ALIGN(sizeof(*sl), NETDEV_ALIGN) + sizeof(struct can_ml_priv);
- dev = alloc_netdev(size, name, NET_NAME_UNKNOWN, slc_setup);
+ dev = alloc_candev(sizeof(*sl), 1);
if (!dev)
return NULL;
+ snprintf(dev->name, sizeof(dev->name), "slcan%d", i);
+ dev->netdev_ops = &slc_netdev_ops;
dev->base_addr = i;
+ slcan_set_ethtool_ops(dev);
sl = netdev_priv(dev);
- can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
- can_set_ml_priv(dev, can_ml);
/* Initialize channel control data */
sl->magic = SLCAN_MAGIC;
sl->dev = dev;
+ sl->can.bitrate_const = slcan_bitrate_const;
+ sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const);
spin_lock_init(&sl->lock);
INIT_WORK(&sl->tx_work, slcan_transmit);
+ init_waitqueue_head(&sl->xcmd_wait);
slcan_devs[i] = dev;
return sl;
@@ -609,26 +943,28 @@ static int slcan_open(struct tty_struct *tty)
set_bit(SLF_INUSE, &sl->flags);
- err = register_netdevice(sl->dev);
- if (err)
+ rtnl_unlock();
+ err = register_candev(sl->dev);
+ if (err) {
+ pr_err("slcan: can't register candev\n");
goto err_free_chan;
+ }
+ } else {
+ rtnl_unlock();
}
- /* Done. We have linked the TTY line to a channel. */
- rtnl_unlock();
tty->receive_room = 65536; /* We don't flow control */
/* TTY layer expects 0 on success */
return 0;
err_free_chan:
+ rtnl_lock();
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
- slc_free_netdev(sl->dev);
- /* do not call free_netdev before rtnl_unlock */
+ slc_dealloc(sl);
rtnl_unlock();
- free_netdev(sl->dev);
return err;
err_exit:
@@ -662,9 +998,11 @@ static void slcan_close(struct tty_struct *tty)
synchronize_rcu();
flush_work(&sl->tx_work);
- /* Flush network side */
- unregister_netdev(sl->dev);
- /* This will complete via sl_free_netdev */
+ slc_close(sl->dev);
+ unregister_candev(sl->dev);
+ rtnl_lock();
+ slc_dealloc(sl);
+ rtnl_unlock();
}
static void slcan_hangup(struct tty_struct *tty)
@@ -772,15 +1110,15 @@ static void __exit slcan_exit(void)
dev = slcan_devs[i];
if (!dev)
continue;
- slcan_devs[i] = NULL;
sl = netdev_priv(dev);
if (sl->tty) {
- printk(KERN_ERR "%s: tty discipline still running\n",
- dev->name);
+ netdev_err(dev, "tty discipline still running\n");
}
- unregister_netdev(dev);
+ slc_close(dev);
+ unregister_candev(dev);
+ slc_dealloc(sl);
}
kfree(slcan_devs);
diff --git a/drivers/net/can/slcan/slcan-ethtool.c b/drivers/net/can/slcan/slcan-ethtool.c
new file mode 100644
index 000000000000..bf0afdc4e49d
--- /dev/null
+++ b/drivers/net/can/slcan/slcan-ethtool.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "slcan.h"
+
+static const char slcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN BIT(0)
+ "err-rst-on-open",
+};
+
+static void slcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, slcan_priv_flags_strings,
+ sizeof(slcan_priv_flags_strings));
+ }
+}
+
+static u32 slcan_get_priv_flags(struct net_device *ndev)
+{
+ u32 flags = 0;
+
+ if (slcan_err_rst_on_open(ndev))
+ flags |= SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN;
+
+ return flags;
+}
+
+static int slcan_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ bool err_rst_op_open = !!(flags & SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN);
+
+ return slcan_enable_err_rst_on_open(ndev, err_rst_op_open);
+}
+
+static int slcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(slcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ethtool_ops slcan_ethtool_ops = {
+ .get_strings = slcan_get_strings,
+ .get_priv_flags = slcan_get_priv_flags,
+ .set_priv_flags = slcan_set_priv_flags,
+ .get_sset_count = slcan_get_sset_count,
+};
+
+void slcan_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &slcan_ethtool_ops;
+}
diff --git a/drivers/net/can/slcan/slcan.h b/drivers/net/can/slcan/slcan.h
new file mode 100644
index 000000000000..d463c8d99e22
--- /dev/null
+++ b/drivers/net/can/slcan/slcan.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * slcan.h - serial line CAN interface driver
+ *
+ * Copyright (C) Laurence Culhane <loz@holmes.demon.co.uk>
+ * Copyright (C) Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * Copyright (C) Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#ifndef _SLCAN_H
+#define _SLCAN_H
+
+bool slcan_err_rst_on_open(struct net_device *ndev);
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on);
+void slcan_set_ethtool_ops(struct net_device *ndev);
+
+#endif /* _SLCAN_H */
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
index dd0fc0a54be1..877e4356010d 100644
--- a/drivers/net/can/spi/mcp251xfd/Kconfig
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -2,6 +2,7 @@
config CAN_MCP251XFD
tristate "Microchip MCP251xFD SPI CAN controllers"
+ select CAN_RX_OFFLOAD
select REGMAP
select WANT_DEV_COREDUMP
help
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index b21252390216..9b47b07162fe 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -12,6 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
+#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -1650,6 +1651,7 @@ static int mcp251xfd_stop(struct net_device *ndev)
netif_stop_queue(ndev);
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
hrtimer_cancel(&priv->rx_irq_timer);
+ hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
can_rx_offload_disable(&priv->offload);
@@ -1777,7 +1779,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
xfer[0].len = sizeof(buf_tx->cmd);
xfer[0].speed_hz = priv->spi_max_speed_hz_slow;
xfer[1].rx_buf = buf_rx->data;
- xfer[1].len = sizeof(dev_id);
+ xfer[1].len = sizeof(*dev_id);
xfer[1].speed_hz = priv->spi_max_speed_hz_fast;
mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
@@ -1786,7 +1788,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
if (err)
goto out_kfree_buf_tx;
- *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
+ *dev_id = get_unaligned_le32(buf_rx->data);
*effective_speed_hz_slow = xfer[0].effective_speed_hz;
*effective_speed_hz_fast = xfer[1].effective_speed_hz;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 217510c12af5..92b7bc7f14b9 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -334,19 +334,21 @@ mcp251xfd_regmap_crc_read(void *context,
* register. It increments once per SYS clock tick,
* which is 20 or 40 MHz.
*
- * Observation shows that if the lowest byte (which is
- * transferred first on the SPI bus) of that register
- * is 0x00 or 0x80 the calculated CRC doesn't always
- * match the transferred one.
+ * Observation on the mcp2518fd shows that if the
+ * lowest byte (which is transferred first on the SPI
+ * bus) of that register is 0x00 or 0x80 the
+ * calculated CRC doesn't always match the transferred
+ * one. On the mcp2517fd this problem is not limited
+ * to the first byte being 0x00 or 0x80.
*
* If the highest bit in the lowest byte is flipped
* the transferred CRC matches the calculated one. We
- * assume for now the CRC calculation in the chip
- * works on wrong data and the transferred data is
- * correct.
+ * assume for now the CRC operates on the correct
+ * data.
*/
if (reg == MCP251XFD_REG_TBC &&
- (buf_rx->data[0] == 0x0 || buf_rx->data[0] == 0x80)) {
+ ((buf_rx->data[0] & 0xf8) == 0x0 ||
+ (buf_rx->data[0] & 0xf8) == 0x80)) {
/* Flip highest bit in lowest byte of le32 */
buf_rx->data[0] ^= 0x80;
@@ -356,10 +358,8 @@ mcp251xfd_regmap_crc_read(void *context,
val_len);
if (!err) {
/* If CRC is now correct, assume
- * transferred data was OK, flip bit
- * back to original value.
+ * flipped data is OK.
*/
- buf_rx->data[0] ^= 0x80;
goto out;
}
}
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index f959215c9d53..1218f9642f33 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -14,11 +14,18 @@ config CAN_EMS_USB
This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
-config CAN_ESD_USB2
- tristate "ESD USB/2 CAN/USB interface"
+config CAN_ESD_USB
+ tristate "esd electronics gmbh CAN/USB interfaces"
help
- This driver supports the CAN-USB/2 interface
- from esd electronic system design gmbh (http://www.esd.eu).
+ This driver adds supports for several CAN/USB interfaces
+ from esd electronics gmbh (https://www.esd.eu).
+
+ The drivers supports the following devices:
+ - esd CAN-USB/2
+ - esd CAN-USB/Micro
+
+ To compile this driver as a module, choose M here: the module
+ will be called esd_usb.
config CAN_ETAS_ES58X
tristate "ETAS ES58X CAN/USB interfaces"
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 748cf31a0d53..1ea16be5743b 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
-obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_ESD_USB) += esd_usb.o
obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb.c
index 286daaaea0b8..8a4bf2961f3d 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * CAN driver for esd CAN-USB/2 and CAN-USB/Micro
+ * CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro
*
- * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
+ * Copyright (C) 2022 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
*/
#include <linux/signal.h>
#include <linux/slab.h>
@@ -14,20 +15,24 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
-MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces");
+MODULE_AUTHOR("Matthias Fuchs <socketcan@esd.eu>");
+MODULE_AUTHOR("Frank Jungclaus <frank.jungclaus@esd.eu>");
+MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro interfaces");
MODULE_LICENSE("GPL v2");
-/* Define these values to match your devices */
+/* USB vendor and product ID */
#define USB_ESDGMBH_VENDOR_ID 0x0ab4
#define USB_CANUSB2_PRODUCT_ID 0x0010
#define USB_CANUSBM_PRODUCT_ID 0x0011
+/* CAN controller clock frequencies */
#define ESD_USB2_CAN_CLOCK 60000000
#define ESD_USBM_CAN_CLOCK 36000000
-#define ESD_USB2_MAX_NETS 2
-/* USB2 commands */
+/* Maximum number of CAN nets */
+#define ESD_USB_MAX_NETS 2
+
+/* USB commands */
#define CMD_VERSION 1 /* also used for VERSION_REPLY */
#define CMD_CAN_RX 2 /* device to host only */
#define CMD_CAN_TX 3 /* also used for TX_DONE */
@@ -43,13 +48,15 @@ MODULE_LICENSE("GPL v2");
#define ESD_EVENT 0x40000000
#define ESD_IDMASK 0x1fffffff
-/* esd CAN event ids used by this driver */
-#define ESD_EV_CAN_ERROR_EXT 2
+/* esd CAN event ids */
+#define ESD_EV_CAN_ERROR_EXT 2 /* CAN controller specific diagnostic data */
/* baudrate message flags */
-#define ESD_USB2_UBR 0x80000000
-#define ESD_USB2_LOM 0x40000000
-#define ESD_USB2_NO_BAUDRATE 0x7fffffff
+#define ESD_USB_UBR 0x80000000
+#define ESD_USB_LOM 0x40000000
+#define ESD_USB_NO_BAUDRATE 0x7fffffff
+
+/* bit timing CAN-USB/2 */
#define ESD_USB2_TSEG1_MIN 1
#define ESD_USB2_TSEG1_MAX 16
#define ESD_USB2_TSEG1_SHIFT 16
@@ -68,7 +75,7 @@ MODULE_LICENSE("GPL v2");
#define ESD_ID_ENABLE 0x80
#define ESD_MAX_ID_SEGMENT 64
-/* SJA1000 ECC register (emulated by usb2 firmware) */
+/* SJA1000 ECC register (emulated by usb firmware) */
#define SJA1000_ECC_SEG 0x1F
#define SJA1000_ECC_DIR 0x20
#define SJA1000_ECC_ERR 0x06
@@ -158,7 +165,7 @@ struct set_baudrate_msg {
};
/* Main message type used between library and application */
-struct __attribute__ ((packed)) esd_usb2_msg {
+struct __packed esd_usb_msg {
union {
struct header_msg hdr;
struct version_msg version;
@@ -171,23 +178,23 @@ struct __attribute__ ((packed)) esd_usb2_msg {
} msg;
};
-static struct usb_device_id esd_usb2_table[] = {
+static struct usb_device_id esd_usb_table[] = {
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)},
{}
};
-MODULE_DEVICE_TABLE(usb, esd_usb2_table);
+MODULE_DEVICE_TABLE(usb, esd_usb_table);
-struct esd_usb2_net_priv;
+struct esd_usb_net_priv;
struct esd_tx_urb_context {
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
u32 echo_index;
};
-struct esd_usb2 {
+struct esd_usb {
struct usb_device *udev;
- struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS];
+ struct esd_usb_net_priv *nets[ESD_USB_MAX_NETS];
struct usb_anchor rx_submitted;
@@ -198,22 +205,22 @@ struct esd_usb2 {
dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
-struct esd_usb2_net_priv {
+struct esd_usb_net_priv {
struct can_priv can; /* must be the first member */
atomic_t active_tx_jobs;
struct usb_anchor tx_submitted;
struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
- struct esd_usb2 *usb2;
+ struct esd_usb *usb;
struct net_device *netdev;
int index;
u8 old_state;
struct can_berr_counter bec;
};
-static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ struct esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct can_frame *cf;
@@ -296,8 +303,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
}
}
-static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv,
+ struct esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct can_frame *cf;
@@ -311,7 +318,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
id = le32_to_cpu(msg->msg.rx.id);
if (id & ESD_EVENT) {
- esd_usb2_rx_event(priv, msg);
+ esd_usb_rx_event(priv, msg);
} else {
skb = alloc_can_skb(priv->netdev, &cf);
if (skb == NULL) {
@@ -338,12 +345,10 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
netif_rx(skb);
}
-
- return;
}
-static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
+ struct esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct net_device *netdev = priv->netdev;
@@ -370,9 +375,9 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
netif_wake_queue(netdev);
}
-static void esd_usb2_read_bulk_callback(struct urb *urb)
+static void esd_usb_read_bulk_callback(struct urb *urb)
{
- struct esd_usb2 *dev = urb->context;
+ struct esd_usb *dev = urb->context;
int retval;
int pos = 0;
int i;
@@ -394,9 +399,9 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
}
while (pos < urb->actual_length) {
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
- msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos);
+ msg = (struct esd_usb_msg *)(urb->transfer_buffer + pos);
switch (msg->msg.hdr.cmd) {
case CMD_CAN_RX:
@@ -405,7 +410,7 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
break;
}
- esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
+ esd_usb_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
break;
case CMD_CAN_TX:
@@ -414,8 +419,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
break;
}
- esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
- msg);
+ esd_usb_tx_done_msg(dev->nets[msg->msg.txdone.net],
+ msg);
break;
}
@@ -430,7 +435,7 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
urb->transfer_buffer, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
+ esd_usb_read_bulk_callback, dev);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval == -ENODEV) {
@@ -442,19 +447,15 @@ resubmit_urb:
dev_err(dev->udev->dev.parent,
"failed resubmitting read bulk urb: %d\n", retval);
}
-
- return;
}
-/*
- * callback for bulk IN urb
- */
-static void esd_usb2_write_bulk_callback(struct urb *urb)
+/* callback for bulk IN urb */
+static void esd_usb_write_bulk_callback(struct urb *urb)
{
struct esd_tx_urb_context *context = urb->context;
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
struct net_device *netdev;
- size_t size = sizeof(struct esd_usb2_msg);
+ size_t size = sizeof(struct esd_usb_msg);
WARN_ON(!context);
@@ -478,7 +479,7 @@ static ssize_t firmware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d.%d.%d\n",
(dev->version >> 12) & 0xf,
@@ -491,7 +492,7 @@ static ssize_t hardware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d.%d.%d\n",
(dev->version >> 28) & 0xf,
@@ -504,13 +505,13 @@ static ssize_t nets_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d", dev->net_count);
}
static DEVICE_ATTR_RO(nets);
-static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
+static int esd_usb_send_msg(struct esd_usb *dev, struct esd_usb_msg *msg)
{
int actual_length;
@@ -522,8 +523,8 @@ static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
1000);
}
-static int esd_usb2_wait_msg(struct esd_usb2 *dev,
- struct esd_usb2_msg *msg)
+static int esd_usb_wait_msg(struct esd_usb *dev,
+ struct esd_usb_msg *msg)
{
int actual_length;
@@ -535,7 +536,7 @@ static int esd_usb2_wait_msg(struct esd_usb2 *dev,
1000);
}
-static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
{
int i, err = 0;
@@ -568,7 +569,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
usb_fill_bulk_urb(urb, dev->udev,
usb_rcvbulkpipe(dev->udev, 1),
buf, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
+ esd_usb_read_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->rx_submitted);
@@ -606,14 +607,12 @@ freeurb:
return 0;
}
-/*
- * Start interface
- */
-static int esd_usb2_start(struct esd_usb2_net_priv *priv)
+/* Start interface */
+static int esd_usb_start(struct esd_usb_net_priv *priv)
{
- struct esd_usb2 *dev = priv->usb2;
+ struct esd_usb *dev = priv->usb;
struct net_device *netdev = priv->netdev;
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
int err, i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -622,8 +621,7 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
goto out;
}
- /*
- * Enable all IDs
+ /* Enable all IDs
* The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
* Each bit represents one 11 bit CAN identifier. A set bit
* enables reception of the corresponding CAN identifier. A cleared
@@ -644,11 +642,11 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
/* enable 29bit extended IDs */
msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
- err = esd_usb2_send_msg(dev, msg);
+ err = esd_usb_send_msg(dev, msg);
if (err)
goto out;
- err = esd_usb2_setup_rx_urbs(dev);
+ err = esd_usb_setup_rx_urbs(dev);
if (err)
goto out;
@@ -664,9 +662,9 @@ out:
return err;
}
-static void unlink_all_urbs(struct esd_usb2 *dev)
+static void unlink_all_urbs(struct esd_usb *dev)
{
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
@@ -687,9 +685,9 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
}
}
-static int esd_usb2_open(struct net_device *netdev)
+static int esd_usb_open(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
int err;
/* common open */
@@ -698,7 +696,7 @@ static int esd_usb2_open(struct net_device *netdev)
return err;
/* finally start device */
- err = esd_usb2_start(priv);
+ err = esd_usb_start(priv);
if (err) {
netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
@@ -710,20 +708,20 @@ static int esd_usb2_open(struct net_device *netdev)
return 0;
}
-static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
+static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2 *dev = priv->usb2;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb *dev = priv->usb;
struct esd_tx_urb_context *context = NULL;
struct net_device_stats *stats = &netdev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
struct urb *urb;
u8 *buf;
int i, err;
int ret = NETDEV_TX_OK;
- size_t size = sizeof(struct esd_usb2_msg);
+ size_t size = sizeof(struct esd_usb_msg);
if (can_dropped_invalid_skb(netdev, skb))
return NETDEV_TX_OK;
@@ -745,7 +743,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
goto nobufmem;
}
- msg = (struct esd_usb2_msg *)buf;
+ msg = (struct esd_usb_msg *)buf;
msg->msg.hdr.len = 3; /* minimal length */
msg->msg.hdr.cmd = CMD_CAN_TX;
@@ -771,9 +769,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
}
}
- /*
- * This may never happen.
- */
+ /* This may never happen */
if (!context) {
netdev_warn(netdev, "couldn't find free context\n");
ret = NETDEV_TX_BUSY;
@@ -788,7 +784,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
msg->msg.hdr.len << 2,
- esd_usb2_write_bulk_callback, context);
+ esd_usb_write_bulk_callback, context);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -821,8 +817,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
netif_trans_update(netdev);
- /*
- * Release our reference to this URB, the USB core will eventually free
+ /* Release our reference to this URB, the USB core will eventually free
* it entirely.
*/
usb_free_urb(urb);
@@ -839,24 +834,24 @@ nourbmem:
return ret;
}
-static int esd_usb2_close(struct net_device *netdev)
+static int esd_usb_close(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2_msg *msg;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_msg *msg;
int i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
- /* Disable all IDs (see esd_usb2_start()) */
+ /* Disable all IDs (see esd_usb_start()) */
msg->msg.hdr.cmd = CMD_IDADD;
msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
msg->msg.filter.net = priv->index;
msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
msg->msg.filter.mask[i] = 0;
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
+ if (esd_usb_send_msg(priv->usb, msg) < 0)
netdev_err(netdev, "sending idadd message failed\n");
/* set CAN controller to reset mode */
@@ -864,8 +859,8 @@ static int esd_usb2_close(struct net_device *netdev)
msg->msg.hdr.cmd = CMD_SETBAUD;
msg->msg.setbaud.net = priv->index;
msg->msg.setbaud.rsvd = 0;
- msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
+ msg->msg.setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
+ if (esd_usb_send_msg(priv->usb, msg) < 0)
netdev_err(netdev, "sending setbaud message failed\n");
priv->can.state = CAN_STATE_STOPPED;
@@ -879,10 +874,10 @@ static int esd_usb2_close(struct net_device *netdev)
return 0;
}
-static const struct net_device_ops esd_usb2_netdev_ops = {
- .ndo_open = esd_usb2_open,
- .ndo_stop = esd_usb2_close,
- .ndo_start_xmit = esd_usb2_start_xmit,
+static const struct net_device_ops esd_usb_netdev_ops = {
+ .ndo_open = esd_usb_open,
+ .ndo_stop = esd_usb_close,
+ .ndo_start_xmit = esd_usb_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
@@ -900,20 +895,20 @@ static const struct can_bittiming_const esd_usb2_bittiming_const = {
static int esd_usb2_set_bittiming(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
int err;
u32 canbtr;
int sjw_shift;
- canbtr = ESD_USB2_UBR;
+ canbtr = ESD_USB_UBR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- canbtr |= ESD_USB2_LOM;
+ canbtr |= ESD_USB_LOM;
canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
- if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) ==
+ if (le16_to_cpu(priv->usb->udev->descriptor.idProduct) ==
USB_CANUSBM_PRODUCT_ID)
sjw_shift = ESD_USBM_SJW_SHIFT;
else
@@ -941,16 +936,16 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
netdev_info(netdev, "setting BTR=%#x\n", canbtr);
- err = esd_usb2_send_msg(priv->usb2, msg);
+ err = esd_usb_send_msg(priv->usb, msg);
kfree(msg);
return err;
}
-static int esd_usb2_get_berr_counter(const struct net_device *netdev,
- struct can_berr_counter *bec)
+static int esd_usb_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
bec->txerr = priv->bec.txerr;
bec->rxerr = priv->bec.rxerr;
@@ -958,7 +953,7 @@ static int esd_usb2_get_berr_counter(const struct net_device *netdev,
return 0;
}
-static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
+static int esd_usb_set_mode(struct net_device *netdev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
@@ -972,11 +967,11 @@ static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
return 0;
}
-static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
+static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
struct net_device *netdev;
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
int err = 0;
int i;
@@ -995,7 +990,7 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
for (i = 0; i < MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
- priv->usb2 = dev;
+ priv->usb = dev;
priv->netdev = netdev;
priv->index = index;
@@ -1013,12 +1008,12 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
priv->can.bittiming_const = &esd_usb2_bittiming_const;
priv->can.do_set_bittiming = esd_usb2_set_bittiming;
- priv->can.do_set_mode = esd_usb2_set_mode;
- priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
+ priv->can.do_set_mode = esd_usb_set_mode;
+ priv->can.do_get_berr_counter = esd_usb_get_berr_counter;
netdev->flags |= IFF_ECHO; /* we support local echo */
- netdev->netdev_ops = &esd_usb2_netdev_ops;
+ netdev->netdev_ops = &esd_usb_netdev_ops;
SET_NETDEV_DEV(netdev, &intf->dev);
netdev->dev_id = index;
@@ -1038,17 +1033,16 @@ done:
return err;
}
-/*
- * probe function for new USB2 devices
+/* probe function for new USB devices
*
* check version information and number of available
* CAN interfaces
*/
-static int esd_usb2_probe(struct usb_interface *intf,
+static int esd_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct esd_usb2 *dev;
- struct esd_usb2_msg *msg;
+ struct esd_usb *dev;
+ struct esd_usb_msg *msg;
int i, err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1076,13 +1070,13 @@ static int esd_usb2_probe(struct usb_interface *intf,
msg->msg.version.flags = 0;
msg->msg.version.drv_version = 0;
- err = esd_usb2_send_msg(dev, msg);
+ err = esd_usb_send_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "sending version message failed\n");
goto free_msg;
}
- err = esd_usb2_wait_msg(dev, msg);
+ err = esd_usb_wait_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "no version message answer\n");
goto free_msg;
@@ -1105,7 +1099,7 @@ static int esd_usb2_probe(struct usb_interface *intf,
/* do per device probing */
for (i = 0; i < dev->net_count; i++)
- esd_usb2_probe_one_net(intf, i);
+ esd_usb_probe_one_net(intf, i);
free_msg:
kfree(msg);
@@ -1115,12 +1109,10 @@ done:
return err;
}
-/*
- * called by the usb core when the device is removed from the system
- */
-static void esd_usb2_disconnect(struct usb_interface *intf)
+/* called by the usb core when the device is removed from the system */
+static void esd_usb_disconnect(struct usb_interface *intf)
{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
struct net_device *netdev;
int i;
@@ -1144,11 +1136,11 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
}
/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver esd_usb2_driver = {
- .name = "esd_usb2",
- .probe = esd_usb2_probe,
- .disconnect = esd_usb2_disconnect,
- .id_table = esd_usb2_table,
+static struct usb_driver esd_usb_driver = {
+ .name = "esd_usb",
+ .probe = esd_usb_probe,
+ .disconnect = esd_usb_disconnect,
+ .id_table = esd_usb_table,
};
-module_usb_driver(esd_usb2_driver);
+module_usb_driver(esd_usb_driver);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 2d73ebbf3836..7353745f92d7 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -1707,7 +1707,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
{
const struct device *dev = es58x_dev->dev;
const struct es58x_parameters *param = es58x_dev->param;
- size_t rx_buf_len = es58x_dev->rx_max_packet_size;
+ u16 rx_buf_len = usb_maxpacket(es58x_dev->udev, es58x_dev->rx_pipe);
struct urb *urb;
u8 *buf;
int i;
@@ -1739,7 +1739,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
dev_err(dev, "%s: Could not setup any rx URBs\n", __func__);
return ret;
}
- dev_dbg(dev, "%s: Allocated %d rx URBs each of size %zu\n",
+ dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n",
__func__, i, rx_buf_len);
return ret;
@@ -2223,7 +2223,6 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
ep_in->bEndpointAddress);
es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev,
ep_out->bEndpointAddress);
- es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize);
return es58x_dev;
}
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index e5033cb5e695..d769bdf740b7 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -380,7 +380,6 @@ struct es58x_operators {
* @timestamps: a temporary buffer to store the time stamps before
* feeding them to es58x_can_get_echo_skb(). Can only be used
* in RX branches.
- * @rx_max_packet_size: Maximum length of bulk-in URB.
* @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
* @opened_channel_cnt: number of channels opened. Free of race
* conditions because its two users (net_device_ops:ndo_open()
@@ -401,8 +400,8 @@ struct es58x_device {
const struct es58x_parameters *param;
const struct es58x_operators *ops;
- int rx_pipe;
- int tx_pipe;
+ unsigned int rx_pipe;
+ unsigned int tx_pipe;
struct usb_anchor rx_urbs;
struct usb_anchor tx_urbs_busy;
@@ -414,7 +413,6 @@ struct es58x_device {
u64 timestamps[ES58X_ECHO_BULK_MAX];
- u16 rx_max_packet_size;
u8 num_can_ch;
u8 opened_channel_cnt;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index b29ba9138866..d3a658b444b5 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -268,6 +268,8 @@ struct gs_can {
struct usb_anchor tx_submitted;
atomic_t active_tx_urbs;
+ void *rxbuf[GS_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[GS_MAX_RX_URBS];
};
/* usb interface struct */
@@ -742,6 +744,7 @@ static int gs_can_open(struct net_device *netdev)
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
+ dma_addr_t buf_dma;
/* alloc rx urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -752,7 +755,7 @@ static int gs_can_open(struct net_device *netdev)
buf = usb_alloc_coherent(dev->udev,
dev->parent->hf_size_rx,
GFP_KERNEL,
- &urb->transfer_dma);
+ &buf_dma);
if (!buf) {
netdev_err(netdev,
"No memory left for USB buffer\n");
@@ -760,6 +763,8 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
}
+ urb->transfer_dma = buf_dma;
+
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
@@ -781,10 +786,17 @@ static int gs_can_open(struct net_device *netdev)
"usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev,
+ sizeof(struct gs_host_frame),
+ buf,
+ buf_dma);
usb_free_urb(urb);
break;
}
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
/* Drop reference,
* USB core will take care of freeing it
*/
@@ -842,13 +854,20 @@ static int gs_can_close(struct net_device *netdev)
int rc;
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
+ unsigned int i;
netif_stop_queue(netdev);
/* Stop polling */
parent->active_channels--;
- if (!parent->active_channels)
+ if (!parent->active_channels) {
usb_kill_anchored_urbs(&parent->rx_submitted);
+ for (i = 0; i < GS_MAX_RX_URBS; i++)
+ usb_free_coherent(dev->udev,
+ sizeof(struct gs_host_frame),
+ dev->rxbuf[i],
+ dev->rxbuf_dma[i]);
+ }
/* Stop sending URBs */
usb_kill_anchored_urbs(&dev->tx_submitted);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 3a49257f9fa6..eefcbe3aadce 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -35,9 +35,10 @@
#define KVASER_USB_RX_BUFFER_SIZE 3072
#define KVASER_USB_MAX_NET_DEVICES 5
-/* USB devices features */
-#define KVASER_USB_HAS_SILENT_MODE BIT(0)
-#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
+/* Kvaser USB device quirks */
+#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
+#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
+#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
@@ -65,12 +66,7 @@ struct kvaser_usb_dev_card_data_hydra {
struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
- union {
- struct {
- enum kvaser_usb_leaf_family family;
- } leaf;
- struct kvaser_usb_dev_card_data_hydra hydra;
- };
+ struct kvaser_usb_dev_card_data_hydra hydra;
};
/* Context for an outstanding, not yet ACKed, transmission */
@@ -83,7 +79,7 @@ struct kvaser_usb {
struct usb_device *udev;
struct usb_interface *intf;
struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
- const struct kvaser_usb_dev_ops *ops;
+ const struct kvaser_usb_driver_info *driver_info;
const struct kvaser_usb_dev_cfg *cfg;
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
@@ -165,6 +161,12 @@ struct kvaser_usb_dev_ops {
u16 transid);
};
+struct kvaser_usb_driver_info {
+ u32 quirks;
+ enum kvaser_usb_leaf_family family;
+ const struct kvaser_usb_dev_ops *ops;
+};
+
struct kvaser_usb_dev_cfg {
const struct can_clock clock;
const unsigned int timestamp_freq;
@@ -184,4 +186,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
int len);
int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+
+extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index e67658b53d02..f211bfcb1d97 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -61,8 +61,6 @@
#define USB_USBCAN_R_V2_PRODUCT_ID 294
#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 295
#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 296
-#define USB_LEAF_PRODUCT_ID_END \
- USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID
/* Kvaser USBCan-II devices product ids */
#define USB_USBCAN_REVB_PRODUCT_ID 2
@@ -89,116 +87,153 @@
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276
#define USB_HYBRID_CANLIN_PRODUCT_ID 277
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278
-#define USB_HYDRA_PRODUCT_ID_END \
- USB_HYBRID_PRO_CANLIN_PRODUCT_ID
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
- return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
- (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
- id->idProduct <= USB_LEAF_PRODUCT_ID_END);
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
+ .quirks = 0,
+ .ops = &kvaser_usb_hydra_dev_ops,
+};
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
- id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE,
+ .family = KVASER_USBCAN,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
-static inline bool kvaser_is_hydra(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
- id->idProduct <= USB_HYDRA_PRODUCT_ID_END;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = {
+ .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ .quirks = 0,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
static const struct usb_device_id kvaser_usb_table[] = {
- /* Leaf USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ /* Leaf M32C USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+
+ /* Leaf i.MX28 USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
/* USBCANII USB product IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
/* Minihydra USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -285,6 +320,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
static void kvaser_usb_read_bulk_callback(struct urb *urb)
{
struct kvaser_usb *dev = urb->context;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
unsigned int i;
@@ -301,8 +337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
- urb->actual_length);
+ ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+ urb->actual_length);
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev,
@@ -396,6 +432,7 @@ static int kvaser_usb_open(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
err = open_candev(netdev);
@@ -406,11 +443,11 @@ static int kvaser_usb_open(struct net_device *netdev)
if (err)
goto error;
- err = dev->ops->dev_set_opt_mode(priv);
+ err = ops->dev_set_opt_mode(priv);
if (err)
goto error;
- err = dev->ops->dev_start_chip(priv);
+ err = ops->dev_start_chip(priv);
if (err) {
netdev_warn(netdev, "Cannot start device, error %d\n", err);
goto error;
@@ -467,22 +504,23 @@ static int kvaser_usb_close(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
netif_stop_queue(netdev);
- err = dev->ops->dev_flush_queue(priv);
+ err = ops->dev_flush_queue(priv);
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, priv->channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, priv->channel);
if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n",
err);
}
- err = dev->ops->dev_stop_chip(priv);
+ err = ops->dev_stop_chip(priv);
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
@@ -521,6 +559,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
struct net_device_stats *stats = &netdev->stats;
struct kvaser_usb_tx_urb_context *context = NULL;
struct urb *urb;
@@ -563,8 +602,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
goto freeurb;
}
- buf = dev->ops->dev_frame_to_cmd(priv, skb, &cmd_len,
- context->echo_index);
+ buf = ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index);
if (!buf) {
stats->tx_dropped++;
dev_kfree_skb(skb);
@@ -648,15 +686,16 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
}
}
-static int kvaser_usb_init_one(struct kvaser_usb *dev,
- const struct usb_device_id *id, int channel)
+static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
{
struct net_device *netdev;
struct kvaser_usb_net_priv *priv;
+ const struct kvaser_usb_driver_info *driver_info = dev->driver_info;
+ const struct kvaser_usb_dev_ops *ops = driver_info->ops;
int err;
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, channel);
if (err)
return err;
}
@@ -685,20 +724,19 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = dev->cfg->clock.freq;
priv->can.bittiming_const = dev->cfg->bittiming_const;
- priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
- priv->can.do_set_mode = dev->ops->dev_set_mode;
- if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+ priv->can.do_set_bittiming = ops->dev_set_bittiming;
+ priv->can.do_set_mode = ops->dev_set_mode;
+ if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
(priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
- priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
- if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+ priv->can.do_get_berr_counter = ops->dev_get_berr_counter;
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming =
- dev->ops->dev_set_data_bittiming;
+ priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
@@ -729,29 +767,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
struct kvaser_usb *dev;
int err;
int i;
+ const struct kvaser_usb_driver_info *driver_info;
+ const struct kvaser_usb_dev_ops *ops;
+
+ driver_info = (const struct kvaser_usb_driver_info *)id->driver_info;
+ if (!driver_info)
+ return -ENODEV;
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- if (kvaser_is_leaf(id)) {
- dev->card_data.leaf.family = KVASER_LEAF;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_usbcan(id)) {
- dev->card_data.leaf.family = KVASER_USBCAN;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_hydra(id)) {
- dev->ops = &kvaser_usb_hydra_dev_ops;
- } else {
- dev_err(&intf->dev,
- "Product ID (%d) is not a supported Kvaser USB device\n",
- id->idProduct);
- return -ENODEV;
- }
-
dev->intf = intf;
+ dev->driver_info = driver_info;
+ ops = driver_info->ops;
- err = dev->ops->dev_setup_endpoints(dev);
+ err = ops->dev_setup_endpoints(dev);
if (err) {
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
return err;
@@ -765,22 +796,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
- err = dev->ops->dev_init_card(dev);
+ err = ops->dev_init_card(dev);
if (err) {
dev_err(&intf->dev,
"Failed to initialize card, error %d\n", err);
return err;
}
- err = dev->ops->dev_get_software_info(dev);
+ err = ops->dev_get_software_info(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_software_details) {
- err = dev->ops->dev_get_software_details(dev);
+ if (ops->dev_get_software_details) {
+ err = ops->dev_get_software_details(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software details, error %d\n", err);
@@ -798,14 +829,14 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
- err = dev->ops->dev_get_card_info(dev);
+ err = ops->dev_get_card_info(dev);
if (err) {
dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_capabilities) {
- err = dev->ops->dev_get_capabilities(dev);
+ if (ops->dev_get_capabilities) {
+ err = ops->dev_get_capabilities(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get capabilities, error %d\n", err);
@@ -815,7 +846,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
}
for (i = 0; i < dev->nchannels; i++) {
- err = kvaser_usb_init_one(dev, id, i);
+ err = kvaser_usb_init_one(dev, i);
if (err) {
kvaser_usb_remove_interfaces(dev);
return err;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index a26823c5b62a..5d70844ac030 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -375,7 +375,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.brp_inc = 1,
};
-static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = {
.name = "kvaser_usb_flex",
.tseg1_min = 4,
.tseg1_max = 16,
@@ -2052,7 +2052,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
.freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = {
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index c805b999c543..cc809ecd1e62 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -101,16 +101,6 @@
#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN 1
-#define KVASER_USB_TSEG1_MAX 16
-#define KVASER_USB_TSEG2_MIN 1
-#define KVASER_USB_TSEG2_MAX 8
-#define KVASER_USB_SJW_MAX 4
-#define KVASER_USB_BRP_MIN 1
-#define KVASER_USB_BRP_MAX 64
-#define KVASER_USB_BRP_INC 1
-
/* ctrl modes */
#define KVASER_CTRL_MODE_NORMAL 1
#define KVASER_CTRL_MODE_SILENT 2
@@ -343,48 +333,68 @@ struct kvaser_usb_err_summary {
};
};
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
+static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+ .name = "kvaser_usb_ucii",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 16,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = {
+ .name = "kvaser_usb_leaf",
+ .tseg1_min = 3,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 2,
+ .brp_max = 128,
+ .brp_inc = 2,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
.clock = {
.freq = 8 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
.clock = {
.freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.clock = {
.freq = 32 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
static void *
@@ -404,7 +414,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
sizeof(struct kvaser_cmd_tx_can);
cmd->u.tx_can.channel = priv->channel;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
break;
@@ -524,16 +534,23 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
dev->fw_version = le32_to_cpu(softinfo->fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
- switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
- case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
- break;
+ if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+ /* Firmware expects bittiming parameters calculated for 16MHz
+ * clock, regardless of the actual clock
+ */
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+ } else {
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz;
+ break;
+ }
}
}
@@ -550,7 +567,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
if (err)
return err;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
@@ -558,7 +575,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
- dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
+ dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
break;
}
@@ -597,7 +614,7 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
dev->nchannels = cmd.u.cardinfo.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
- (dev->card_data.leaf.family == KVASER_USBCAN &&
+ (dev->driver_info->family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
@@ -730,7 +747,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
priv->can.can_stats.bus_error++;
@@ -809,7 +826,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
@@ -999,7 +1016,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
- (dev->card_data.leaf.family == KVASER_LEAF &&
+ (dev->driver_info->family == KVASER_LEAF &&
cmd->id == CMD_LEAF_LOG_MESSAGE)) {
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
return;
@@ -1015,7 +1032,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
break;
@@ -1030,7 +1047,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+ if (dev->driver_info->family == KVASER_LEAF && cmd->id ==
CMD_LEAF_LOG_MESSAGE) {
cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
if (cf->can_id & KVASER_EXTENDED_FRAME)
@@ -1128,14 +1145,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
break;
case CMD_LEAF_LOG_MESSAGE:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
kvaser_usb_leaf_rx_can_msg(dev, cmd);
break;
case CMD_CHIP_STATE_EVENT:
case CMD_CAN_ERROR_EVENT:
- if (dev->card_data.leaf.family == KVASER_LEAF)
+ if (dev->driver_info->family == KVASER_LEAF)
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
else
kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
@@ -1147,12 +1164,12 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
/* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
- if (dev->card_data.leaf.family != KVASER_USBCAN)
+ if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
break;
case CMD_FLUSH_QUEUE_REPLY:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
break;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 8a3b7b103ca4..0de2f97d9f62 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Xilinx CAN device driver
*
- * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ * Copyright (C) 2012 - 2022 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
* Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
@@ -9,6 +9,7 @@
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -50,7 +51,7 @@ enum xcan_reg {
/* only on CAN FD cores */
XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
- * Prescalar
+ * Prescaler
*/
XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
@@ -86,6 +87,8 @@ enum xcan_reg {
#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
+#define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */
+#define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */
#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
@@ -99,6 +102,7 @@ enum xcan_reg {
#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
+#define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */
#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
@@ -132,6 +136,7 @@ enum xcan_reg {
#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
+#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
@@ -258,7 +263,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
@@ -271,11 +276,31 @@ static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
+/* Transmission Delay Compensation constants for CANFD 1.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 32,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+/* Transmission Delay Compensation constants for CANFD 2.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd2 = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 64,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -405,7 +430,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
return -EPERM;
}
- /* Setting Baud Rate prescalar value in BRPR Register */
+ /* Setting Baud Rate prescaler value in BRPR Register */
btr0 = (bt->brp - 1);
/* Setting Time Segment 1 in BTR Register */
@@ -422,8 +447,16 @@ static int xcan_set_bittiming(struct net_device *ndev)
if (priv->devtype.cantype == XAXI_CANFD ||
priv->devtype.cantype == XAXI_CANFD_2_0) {
- /* Setting Baud Rate prescalar value in F_BRPR Register */
+ /* Setting Baud Rate prescaler value in F_BRPR Register */
btr0 = dbt->brp - 1;
+ if (can_tdc_is_enabled(&priv->can)) {
+ if (priv->devtype.cantype == XAXI_CANFD)
+ btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ else
+ btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ }
/* Setting Time Segment 1 in BTR Register */
btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
@@ -1483,6 +1516,22 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
return 0;
}
+/**
+ * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value
+ * @ndev: Pointer to net_device structure
+ * @tdcv: Pointer to TDCV value
+ *
+ * Return: 0 on success
+ */
+static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
+
+ return 0;
+}
+
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1735,17 +1784,24 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
- if (devtype->cantype == XAXI_CANFD)
+ if (devtype->cantype == XAXI_CANFD) {
priv->can.data_bittiming_const =
&xcan_data_bittiming_const_canfd;
+ priv->can.tdc_const = &xcan_tdc_const_canfd;
+ }
- if (devtype->cantype == XAXI_CANFD_2_0)
+ if (devtype->cantype == XAXI_CANFD_2_0) {
priv->can.data_bittiming_const =
&xcan_data_bittiming_const_canfd2;
+ priv->can.tdc_const = &xcan_tdc_const_canfd2;
+ }
if (devtype->cantype == XAXI_CANFD ||
- devtype->cantype == XAXI_CANFD_2_0)
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ devtype->cantype == XAXI_CANFD_2_0) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_TDC_AUTO;
+ priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
+ }
priv->reg_base = addr;
priv->tx_max = tx_max;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 6d1fcb08bba1..702d68ae435a 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -70,6 +70,15 @@ config NET_DSA_QCA8K
source "drivers/net/dsa/realtek/Kconfig"
+config NET_DSA_RZN1_A5PSW
+ tristate "Renesas RZ/N1 A5PSW Ethernet switch support"
+ depends on OF && ARCH_RZN1
+ select NET_DSA_TAG_RZN1_A5PSW
+ select PCS_RZN1_MIIC
+ help
+ This driver supports the A5PSW switch, which is embedded in Renesas
+ RZ/N1 SoC.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index e73838c12256..b32907afa702 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+obj-$(CONFIG_NET_DSA_RZN1_A5PSW) += rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 0e54b2a0c211..308f15d3832e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -320,8 +320,6 @@ static void b53_spi_remove(struct spi_device *spi)
if (dev)
b53_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
}
static void b53_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 87e81c636339..be0edfa093d0 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -878,6 +878,11 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
if (duplex == DUPLEX_FULL)
reg |= DUPLX_MODE;
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
core_writel(priv, reg, offset);
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index 2572c6087bb5..b28baab6d56a 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -300,6 +300,7 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
const char *label, *state;
int ret = -EINVAL;
+ of_node_get(hellcreek->dev->of_node);
leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
if (!leds) {
dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index c9e2a8989556..06b1efdb5e7d 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,49 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only
-config NET_DSA_MICROCHIP_KSZ_COMMON
- select NET_DSA_TAG_KSZ
- tristate
-
-menuconfig NET_DSA_MICROCHIP_KSZ9477
- tristate "Microchip KSZ9477 series switch support"
+menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
+ tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support"
depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
+ select NET_DSA_TAG_KSZ
help
- This driver adds support for Microchip KSZ9477 switch chips.
+ This driver adds support for Microchip KSZ9477 series switch and
+ KSZ8795/KSZ88x3 switch chips.
config NET_DSA_MICROCHIP_KSZ9477_I2C
- tristate "KSZ9477 series I2C connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && I2C
+ tristate "KSZ series I2C connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && I2C
select REGMAP_I2C
help
Select to enable support for registering switches configured through I2C.
-config NET_DSA_MICROCHIP_KSZ9477_SPI
- tristate "KSZ9477 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && SPI
+config NET_DSA_MICROCHIP_KSZ_SPI
+ tristate "KSZ series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && SPI
select REGMAP_SPI
help
Select to enable support for registering switches configured through SPI.
-menuconfig NET_DSA_MICROCHIP_KSZ8795
- tristate "Microchip KSZ8795 series switch support"
- depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
- help
- This driver adds support for Microchip KSZ8795/KSZ88X3 switch chips.
-
-config NET_DSA_MICROCHIP_KSZ8795_SPI
- tristate "KSZ8795 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795 && SPI
- select REGMAP_SPI
- help
- This driver accesses KSZ8795 chip through SPI.
-
- It is required to use the KSZ8795 switch driver as the only access
- is through SPI.
-
config NET_DSA_MICROCHIP_KSZ8863_SMI
tristate "KSZ series SMI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON
select MDIO_BITBANG
help
Select to enable support for registering switches configured through
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 2a03b21a3386..28873559efc2 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
+ksz_switch-objs := ksz_common.o
+ksz_switch-objs += ksz9477.o
+ksz_switch-objs += ksz8795.o
+ksz_switch-objs += lan937x_main.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index cae76f5e7787..42c50cc4d853 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -9,63 +9,53 @@
#define __KSZ8XXX_H
#include <linux/types.h>
+#include <net/dsa.h>
+#include "ksz_common.h"
-enum ksz_regs {
- REG_IND_CTRL_0,
- REG_IND_DATA_8,
- REG_IND_DATA_CHECK,
- REG_IND_DATA_HI,
- REG_IND_DATA_LO,
- REG_IND_MIB_CHECK,
- REG_IND_BYTE,
- P_FORCE_CTRL,
- P_LINK_STATUS,
- P_LOCAL_CTRL,
- P_NEG_RESTART_CTRL,
- P_REMOTE_STATUS,
- P_SPEED_STATUS,
- S_TAIL_TAG_CTRL,
-};
-
-enum ksz_masks {
- PORT_802_1P_REMAPPING,
- SW_TAIL_TAG_ENABLE,
- MIB_COUNTER_OVERFLOW,
- MIB_COUNTER_VALID,
- VLAN_TABLE_FID,
- VLAN_TABLE_MEMBERSHIP,
- VLAN_TABLE_VALID,
- STATIC_MAC_TABLE_VALID,
- STATIC_MAC_TABLE_USE_FID,
- STATIC_MAC_TABLE_FID,
- STATIC_MAC_TABLE_OVERRIDE,
- STATIC_MAC_TABLE_FWD_PORTS,
- DYNAMIC_MAC_TABLE_ENTRIES_H,
- DYNAMIC_MAC_TABLE_MAC_EMPTY,
- DYNAMIC_MAC_TABLE_NOT_READY,
- DYNAMIC_MAC_TABLE_ENTRIES,
- DYNAMIC_MAC_TABLE_FID,
- DYNAMIC_MAC_TABLE_SRC_PORT,
- DYNAMIC_MAC_TABLE_TIMESTAMP,
-};
-
-enum ksz_shifts {
- VLAN_TABLE_MEMBERSHIP_S,
- VLAN_TABLE,
- STATIC_MAC_FWD_PORTS,
- STATIC_MAC_FID,
- DYNAMIC_MAC_ENTRIES_H,
- DYNAMIC_MAC_ENTRIES,
- DYNAMIC_MAC_FID,
- DYNAMIC_MAC_TIMESTAMP,
- DYNAMIC_MAC_SRC_PORT,
-};
-
-struct ksz8 {
- const u8 *regs;
- const u32 *masks;
- const u8 *shifts;
- void *priv;
-};
+int ksz8_setup(struct dsa_switch *ds);
+u32 ksz8_get_port_addr(int port, int offset);
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
+int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz8_port_init_cnt(struct ksz_device *dev, int port);
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz8_get_stp_reg(void);
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void ksz8_config_cpu_port(struct dsa_switch *ds);
+int ksz8_enable_stp_addr(struct ksz_device *dev);
+int ksz8_reset_switch(struct ksz_device *dev);
+int ksz8_switch_detect(struct ksz_device *dev);
+int ksz8_switch_init(struct ksz_device *dev);
+void ksz8_switch_exit(struct ksz_device *dev);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 12a599d5e61a..911aace42284 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -26,106 +26,6 @@
#include "ksz8795_reg.h"
#include "ksz8.h"
-static const u8 ksz8795_regs[] = {
- [REG_IND_CTRL_0] = 0x6E,
- [REG_IND_DATA_8] = 0x70,
- [REG_IND_DATA_CHECK] = 0x72,
- [REG_IND_DATA_HI] = 0x71,
- [REG_IND_DATA_LO] = 0x75,
- [REG_IND_MIB_CHECK] = 0x74,
- [REG_IND_BYTE] = 0xA0,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x07,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x08,
- [P_SPEED_STATUS] = 0x09,
- [S_TAIL_TAG_CTRL] = 0x0C,
-};
-
-static const u32 ksz8795_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(7),
- [SW_TAIL_TAG_ENABLE] = BIT(1),
- [MIB_COUNTER_OVERFLOW] = BIT(6),
- [MIB_COUNTER_VALID] = BIT(5),
- [VLAN_TABLE_FID] = GENMASK(6, 0),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
- [VLAN_TABLE_VALID] = BIT(12),
- [STATIC_MAC_TABLE_VALID] = BIT(21),
- [STATIC_MAC_TABLE_USE_FID] = BIT(23),
- [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
-};
-
-static const u8 ksz8795_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 7,
- [VLAN_TABLE] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 24,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 29,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 27,
- [DYNAMIC_MAC_SRC_PORT] = 24,
-};
-
-static const u8 ksz8863_regs[] = {
- [REG_IND_CTRL_0] = 0x79,
- [REG_IND_DATA_8] = 0x7B,
- [REG_IND_DATA_CHECK] = 0x7B,
- [REG_IND_DATA_HI] = 0x7C,
- [REG_IND_DATA_LO] = 0x80,
- [REG_IND_MIB_CHECK] = 0x80,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x0C,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x0E,
- [P_SPEED_STATUS] = 0x0F,
- [S_TAIL_TAG_CTRL] = 0x03,
-};
-
-static const u32 ksz8863_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(3),
- [SW_TAIL_TAG_ENABLE] = BIT(6),
- [MIB_COUNTER_OVERFLOW] = BIT(7),
- [MIB_COUNTER_VALID] = BIT(6),
- [VLAN_TABLE_FID] = GENMASK(15, 12),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
- [VLAN_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_USE_FID] = BIT(21),
- [STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
-};
-
-static u8 ksz8863_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 22,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 24,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 24,
- [DYNAMIC_MAC_SRC_PORT] = 20,
-};
-
static bool ksz_is_ksz88x3(struct ksz_device *dev)
{
return dev->chip_id == 0x8830;
@@ -145,11 +45,12 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
int ret = 0;
+ regs = dev->info->regs;
+
mutex_lock(&dev->alu_mutex);
ctrl_addr = IND_ACC_TABLE(table) | addr;
@@ -162,7 +63,7 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
return ret;
}
-static int ksz8_reset_switch(struct ksz_device *dev)
+int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
/* reset switch */
@@ -213,18 +114,17 @@ static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
true);
}
-static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
ctrl_addr = addr + dev->info->reg_mib_cnt * port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
@@ -252,16 +152,15 @@ static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
addr -= dev->info->reg_mib_cnt;
ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
@@ -305,13 +204,14 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
u32 *last = (u32 *)dropped;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u32 cur;
+ regs = dev->info->regs;
+
addr -= dev->info->reg_mib_cnt;
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0;
@@ -334,8 +234,8 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
}
}
-static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
if (ksz_is_ksz88x3(dev))
ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
@@ -343,7 +243,7 @@ static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
}
-static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
if (ksz_is_ksz88x3(dev))
return;
@@ -358,7 +258,7 @@ static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
}
-static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
+void ksz8_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
@@ -392,10 +292,11 @@ static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
+ regs = dev->info->regs;
+
ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
@@ -406,10 +307,11 @@ static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
+ regs = dev->info->regs;
+
ctrl_addr = IND_ACC_TABLE(table) | addr;
mutex_lock(&dev->alu_mutex);
@@ -420,13 +322,12 @@ static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
{
- struct ksz8 *ksz8 = dev->priv;
int timeout = 100;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
do {
ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
@@ -447,22 +348,20 @@ static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
return 0;
}
-static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
- u8 *mac_addr, u8 *fid, u8 *src_port,
- u8 *timestamp, u16 *entries)
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u8 data;
int rc;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
@@ -512,17 +411,16 @@ static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
return rc;
}
-static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
data_hi = data >> 32;
@@ -551,17 +449,16 @@ static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
return -ENXIO;
}
-static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
data_lo = ((u32)alu->mac[2] << 24) |
((u32)alu->mac[3] << 16) |
@@ -587,12 +484,11 @@ static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
u8 *member, u8 *valid)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
const u32 *masks;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
*fid = vlan & masks[VLAN_TABLE_FID];
*member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >>
@@ -603,12 +499,11 @@ static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
u16 *vlan)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
const u32 *masks;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
*vlan = fid;
*vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S];
@@ -618,12 +513,11 @@ static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
u64 data;
int i;
- shifts = ksz8->shifts;
+ shifts = dev->info->shifts;
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
addr *= 4;
@@ -663,16 +557,17 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
ksz8_w_table(dev, TABLE_VLAN, addr, buf);
}
-static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
- struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, link;
- const u8 *regs = ksz8->regs;
int processed = true;
+ const u16 *regs;
u8 val1, val2;
u16 data = 0;
u8 p = phy;
+ regs = dev->info->regs;
+
switch (reg) {
case MII_BMCR:
ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
@@ -786,13 +681,14 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
*val = data;
}
-static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
{
- struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, data;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u8 p = phy;
+ regs = dev->info->regs;
+
switch (reg) {
case MII_BMCR:
@@ -898,30 +794,7 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
}
}
-static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
-{
- struct ksz_device *dev = ds->priv;
-
- /* ksz88x3 uses the same tag schema as KSZ9893 */
- return ksz_is_ksz88x3(dev) ?
- DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795;
-}
-
-static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port)
-{
- /* Silicon Errata Sheet (DS80000830A):
- * Port 1 does not work with LinkMD Cable-Testing.
- * Port 1 does not respond to received PAUSE control frames.
- */
- if (!port)
- return MICREL_KSZ8_P1_ERRATA;
-
- return 0;
-}
-
-static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
u8 data;
@@ -931,16 +804,14 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
}
-static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
-}
-
-static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
struct ksz_port *p;
+ const u16 *regs;
+
+ regs = dev->info->regs;
if ((uint)port < dev->info->port_cnt) {
first = port;
@@ -954,9 +825,9 @@ static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
p = &dev->ports[index];
if (!p->on)
continue;
- ksz_pread8(dev, index, P_STP_CTRL, &learn[index]);
+ ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]);
if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL,
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL],
learn[index] | PORT_LEARN_DISABLE);
}
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
@@ -965,15 +836,113 @@ static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
if (!p->on)
continue;
if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL, learn[index]);
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
}
}
-static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
- struct netlink_ext_ack *extack)
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ksz_device *dev = ds->priv;
+ int ret = 0;
+ u16 i = 0;
+ u16 entries = 0;
+ u8 timestamp = 0;
+ u8 fid;
+ u8 member;
+ struct alu_struct alu;
+
+ do {
+ alu.is_static = false;
+ ret = ksz8_r_dyn_mac_table(dev, i, alu.mac, &fid, &member,
+ &timestamp, &entries);
+ if (!ret && (member & BIT(port))) {
+ ret = cb(alu.mac, alu.fid, alu.is_static, data);
+ if (ret)
+ break;
+ }
+ i++;
+ } while (i < entries);
+ if (i >= entries)
+ ret = 0;
+
+ return ret;
+}
+
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ struct alu_struct alu;
+ int index;
+ int empty = 0;
+
+ alu.port_forward = 0;
+ for (index = 0; index < dev->info->num_statics; index++) {
+ if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
+ break;
+ /* Remember the first empty entry. */
+ } else if (!empty) {
+ empty = index + 1;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics && !empty)
+ return -ENOSPC;
+
+ /* add entry */
+ if (index == dev->info->num_statics) {
+ index = empty - 1;
+ memset(&alu, 0, sizeof(alu));
+ memcpy(alu.mac, mdb->addr, ETH_ALEN);
+ alu.is_static = true;
+ }
+ alu.port_forward |= BIT(port);
+ if (mdb->vid) {
+ alu.is_use_fid = true;
+
+ /* Need a way to map VID to FID. */
+ alu.fid = mdb->vid;
+ }
+ ksz8_w_sta_mac_table(dev, index, &alu);
+
+ return 0;
+}
+
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ struct alu_struct alu;
+ int index;
+
+ for (index = 0; index < dev->info->num_statics; index++) {
+ if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
+ break;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics)
+ goto exit;
+
+ /* clear port */
+ alu.port_forward &= ~BIT(port);
+ if (!alu.port_forward)
+ alu.is_static = false;
+ ksz8_w_sta_mac_table(dev, index, &alu);
+
+exit:
+ return 0;
+}
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack)
+{
if (ksz_is_ksz88x3(dev))
return -ENOTSUPP;
@@ -998,12 +967,11 @@ static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
}
}
-static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
u16 data, new_pvid = 0;
u8 fid, member, valid;
@@ -1071,10 +1039,9 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
- struct ksz_device *dev = ds->priv;
u16 data, pvid;
u8 fid, member, valid;
@@ -1104,12 +1071,10 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress, struct netlink_ext_ack *extack)
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
if (ingress) {
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
dev->mirror_rx |= BIT(port);
@@ -1128,10 +1093,9 @@ static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct ksz_device *dev = ds->priv;
u8 data;
if (mirror->ingress) {
@@ -1197,14 +1161,13 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
p->phydev.duplex = 1;
}
-static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
u8 member;
- masks = ksz8->masks;
+ masks = dev->info->masks;
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
@@ -1234,17 +1197,17 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz8_cfg_port_member(dev, port, member);
}
-static void ksz8_config_cpu_port(struct dsa_switch *ds)
+void ksz8_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
struct ksz_port *p;
const u32 *masks;
+ const u16 *regs;
u8 remote;
int i;
- masks = ksz8->masks;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
/* Switch marks the maximum frame with extra byte as oversize. */
ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
@@ -1258,7 +1221,7 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Last port may be disabled. */
if (i == dev->phy_port_cnt)
@@ -1272,15 +1235,15 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
continue;
if (!ksz_is_ksz88x3(dev)) {
ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
- if (remote & PORT_FIBER_MODE)
+ if (remote & KSZ8_PORT_FIBER_MODE)
p->fiber = 1;
}
if (p->fiber)
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- true);
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, true);
else
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- false);
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, false);
}
}
@@ -1301,22 +1264,26 @@ static int ksz8_handle_global_errata(struct dsa_switch *ds)
return ret;
}
-static int ksz8_setup(struct dsa_switch *ds)
+int ksz8_enable_stp_addr(struct ksz_device *dev)
{
- struct ksz_device *dev = ds->priv;
struct alu_struct alu;
- int i, ret = 0;
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->info->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
+ /* Setup STP address for STP operation. */
+ memset(&alu, 0, sizeof(alu));
+ ether_addr_copy(alu.mac, eth_stp_addr);
+ alu.is_static = true;
+ alu.is_override = true;
+ alu.port_forward = dev->info->cpu_ports;
- ret = ksz8_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
- return ret;
- }
+ ksz8_w_sta_mac_table(dev, 0, &alu);
+
+ return 0;
+}
+
+int ksz8_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
@@ -1335,10 +1302,6 @@ static int ksz8_setup(struct dsa_switch *ds)
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
- ksz8_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true);
-
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
@@ -1346,38 +1309,15 @@ static int ksz8_setup(struct dsa_switch *ds)
if (!ksz_is_ksz88x3(dev))
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
-
for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
- /* Setup STP address for STP operation. */
- memset(&alu, 0, sizeof(alu));
- ether_addr_copy(alu.mac, eth_stp_addr);
- alu.is_static = true;
- alu.is_override = true;
- alu.port_forward = dev->info->cpu_ports;
-
- ksz8_w_sta_mac_table(dev, 0, &alu);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
return ksz8_handle_global_errata(ds);
}
-static void ksz8_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
{
- struct ksz_device *dev = ds->priv;
-
- ksz_phylink_get_caps(ds, port, config);
-
config->mac_capabilities = MAC_10 | MAC_100;
/* Silicon Errata Sheet (DS80000830A):
@@ -1393,102 +1333,17 @@ static void ksz8_get_caps(struct dsa_switch *ds, int port,
config->mac_capabilities |= MAC_ASYM_PAUSE;
}
-static const struct dsa_switch_ops ksz8_switch_ops = {
- .get_tag_protocol = ksz8_get_tag_protocol,
- .get_phy_flags = ksz8_sw_get_phy_flags,
- .setup = ksz8_setup,
- .phy_read = ksz_phy_read16,
- .phy_write = ksz_phy_write16,
- .phylink_get_caps = ksz8_get_caps,
- .phylink_mac_link_down = ksz_mac_link_down,
- .port_enable = ksz_enable_port,
- .get_strings = ksz_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz8_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz8_port_vlan_filtering,
- .port_vlan_add = ksz8_port_vlan_add,
- .port_vlan_del = ksz8_port_vlan_del,
- .port_fdb_dump = ksz_port_fdb_dump,
- .port_mdb_add = ksz_port_mdb_add,
- .port_mdb_del = ksz_port_mdb_del,
- .port_mirror_add = ksz8_port_mirror_add,
- .port_mirror_del = ksz8_port_mirror_del,
-};
-
-static u32 ksz8_get_port_addr(int port, int offset)
+u32 ksz8_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz8_switch_detect(struct ksz_device *dev)
+int ksz8_switch_init(struct ksz_device *dev)
{
- u8 id1, id2;
- u16 id16;
- int ret;
-
- /* read chip id */
- ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
- if (ret)
- return ret;
-
- id1 = id16 >> 8;
- id2 = id16 & SW_CHIP_ID_M;
-
- switch (id1) {
- case KSZ87_FAMILY_ID:
- if ((id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
- return -ENODEV;
-
- if (id2 == CHIP_ID_95) {
- u8 val;
-
- id2 = 0x95;
- ksz_read8(dev, REG_PORT_STATUS_0, &val);
- if (val & PORT_FIBER_MODE)
- id2 = 0x65;
- } else if (id2 == CHIP_ID_94) {
- id2 = 0x94;
- }
- break;
- case KSZ88_FAMILY_ID:
- if (id2 != CHIP_ID_63)
- return -ENODEV;
- break;
- default:
- dev_err(dev->dev, "invalid family id: %d\n", id1);
- return -ENODEV;
- }
- id16 &= ~0xff;
- id16 |= id2;
- dev->chip_id = id16;
-
- return 0;
-}
-
-static int ksz8_switch_init(struct ksz_device *dev)
-{
- struct ksz8 *ksz8 = dev->priv;
-
- dev->ds->ops = &ksz8_switch_ops;
-
dev->cpu_port = fls(dev->info->cpu_ports) - 1;
dev->phy_port_cnt = dev->info->port_cnt - 1;
dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports;
- if (ksz_is_ksz88x3(dev)) {
- ksz8->regs = ksz8863_regs;
- ksz8->masks = ksz8863_masks;
- ksz8->shifts = ksz8863_shifts;
- } else {
- ksz8->regs = ksz8795_regs;
- ksz8->masks = ksz8795_masks;
- ksz8->shifts = ksz8795_shifts;
- }
-
/* We rely on software untagging on the CPU port, so that we
* can support both tagged and untagged VLANs
*/
@@ -1502,37 +1357,11 @@ static int ksz8_switch_init(struct ksz_device *dev)
return 0;
}
-static void ksz8_switch_exit(struct ksz_device *dev)
+void ksz8_switch_exit(struct ksz_device *dev)
{
ksz8_reset_switch(dev);
}
-static const struct ksz_dev_ops ksz8_dev_ops = {
- .get_port_addr = ksz8_get_port_addr,
- .cfg_port_member = ksz8_cfg_port_member,
- .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
- .port_setup = ksz8_port_setup,
- .r_phy = ksz8_r_phy,
- .w_phy = ksz8_w_phy,
- .r_dyn_mac_table = ksz8_r_dyn_mac_table,
- .r_sta_mac_table = ksz8_r_sta_mac_table,
- .w_sta_mac_table = ksz8_w_sta_mac_table,
- .r_mib_cnt = ksz8_r_mib_cnt,
- .r_mib_pkt = ksz8_r_mib_pkt,
- .freeze_mib = ksz8_freeze_mib,
- .port_init_cnt = ksz8_port_init_cnt,
- .shutdown = ksz8_reset_switch,
- .detect = ksz8_switch_detect,
- .init = ksz8_switch_init,
- .exit = ksz8_switch_exit,
-};
-
-int ksz8_switch_register(struct ksz_device *dev)
-{
- return ksz_switch_register(dev, &ksz8_dev_ops);
-}
-EXPORT_SYMBOL(ksz8_switch_register);
-
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 4109433b6b6c..a848eb4c54cb 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -14,22 +14,8 @@
#define KS_PRIO_M 0x3
#define KS_PRIO_S 2
-#define REG_CHIP_ID0 0x00
-
-#define KSZ87_FAMILY_ID 0x87
-#define KSZ88_FAMILY_ID 0x88
-
-#define REG_CHIP_ID1 0x01
-
-#define SW_CHIP_ID_M 0xF0
-#define SW_CHIP_ID_S 4
#define SW_REVISION_M 0x0E
#define SW_REVISION_S 1
-#define SW_START 0x01
-
-#define CHIP_ID_94 0x60
-#define CHIP_ID_95 0x90
-#define CHIP_ID_63 0x30
#define KSZ8863_REG_SW_RESET 0x43
@@ -57,7 +43,6 @@
#define REG_SW_CTRL_2 0x04
#define UNICAST_VLAN_BOUNDARY BIT(7)
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
@@ -77,13 +62,9 @@
#define SW_FLOW_CTRL BIT(5)
#define SW_10_MBIT BIT(4)
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_CTRL_5 0x07
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_CTRL_6 0x08
#define SW_MIB_COUNTER_FLUSH BIT(7)
@@ -217,8 +198,6 @@
#define REG_PORT_4_STATUS_0 0x48
/* For KSZ8765. */
-#define PORT_FIBER_MODE BIT(7)
-
#define PORT_REMOTE_ASYM_PAUSE BIT(5)
#define PORT_REMOTE_SYM_PAUSE BIT(4)
#define PORT_REMOTE_100BTX_FD BIT(3)
@@ -322,7 +301,6 @@
#define REG_PORT_CTRL_5 0x05
-#define REG_PORT_STATUS_0 0x08
#define REG_PORT_STATUS_1 0x09
#define REG_PORT_LINK_MD_CTRL 0x0A
#define REG_PORT_LINK_MD_RESULT 0x0B
@@ -788,7 +766,6 @@
#define P_TAG_CTRL REG_PORT_CTRL_0
#define P_MIRROR_CTRL REG_PORT_CTRL_1
#define P_802_1P_CTRL REG_PORT_CTRL_2
-#define P_STP_CTRL REG_PORT_CTRL_2
#define P_PASS_ALL_CTRL REG_PORT_CTRL_12
#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12
#define P_DROP_TAG_CTRL REG_PORT_CTRL_13
@@ -813,12 +790,6 @@
#define REG_IND_EEE_GLOB2_LO 0x34
#define REG_IND_EEE_GLOB2_HI 0x35
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
-
/**
* MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
* MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index b6f99e641dca..5247fdfb964d 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -26,11 +26,9 @@ static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
struct mdio_device *mdev;
u8 reg = *(u8 *)reg_buf;
u8 *val = val_buf;
- struct ksz8 *ksz8;
int i, ret = 0;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
for (i = 0; i < val_len; i++) {
@@ -55,13 +53,11 @@ static int ksz8863_mdio_write(void *ctx, const void *data, size_t count)
{
struct ksz_device *dev = ctx;
struct mdio_device *mdev;
- struct ksz8 *ksz8;
int i, ret = 0;
u32 reg;
u8 *val;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
val = (u8 *)(data + 4);
reg = *(u32 *)data;
@@ -142,17 +138,10 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev)
{
struct regmap_config rc;
struct ksz_device *dev;
- struct ksz8 *ksz8;
int ret;
int i;
- ksz8 = devm_kzalloc(&mdiodev->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = mdiodev;
-
- dev = ksz_switch_alloc(&mdiodev->dev, ksz8);
+ dev = ksz_switch_alloc(&mdiodev->dev, mdiodev);
if (!dev)
return -ENOMEM;
@@ -174,7 +163,7 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev)
if (mdiodev->dev.platform_data)
dev->pdata = mdiodev->dev.platform_data;
- ret = ksz8_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index ab40b700cf1a..6453642fa14c 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -17,6 +17,7 @@
#include "ksz9477_reg.h"
#include "ksz_common.h"
+#include "ksz9477.h"
/* Used with variable features to indicate capabilities. */
#define GBIT_SUPPORT BIT(0)
@@ -47,9 +48,8 @@ static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
bits, set ? bits : 0);
}
-static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu)
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
{
- struct ksz_device *dev = ds->priv;
u16 frame_size, max_frame = 0;
int i;
@@ -65,7 +65,7 @@ static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu)
REG_SW_MTU_MASK, max_frame);
}
-static int ksz9477_max_mtu(struct dsa_switch *ds, int port)
+int ksz9477_max_mtu(struct ksz_device *dev, int port)
{
return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
@@ -175,7 +175,7 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
10, 1000);
}
-static int ksz9477_reset_switch(struct ksz_device *dev)
+int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
u32 data32;
@@ -198,12 +198,6 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], REG_SW_MAC_CTRL_2,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
-
data8 = SW_ENABLE_REFCLKO;
if (dev->synclko_disable)
data8 = 0;
@@ -214,8 +208,7 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
return 0;
}
-static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
- u64 *cnt)
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
struct ksz_port *p = &dev->ports[port];
unsigned int val;
@@ -242,14 +235,14 @@ static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
*cnt += data;
}
-static void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
addr = dev->info->mib_names[addr].index;
ksz9477_r_mib_cnt(dev, port, addr, cnt);
}
-static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
struct ksz_port *p = &dev->ports[port];
@@ -263,7 +256,7 @@ static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
-static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -276,21 +269,8 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
mutex_unlock(&mib->cnt_mutex);
}
-static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
+void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
- enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
- struct ksz_device *dev = ds->priv;
-
- if (dev->features & IS_9893)
- proto = DSA_TAG_PROTO_KSZ9893;
- return proto;
-}
-
-static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
-{
- struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
/* No real PHY after this. Simulate the PHY.
@@ -335,40 +315,30 @@ static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
}
- return val;
+ *data = val;
}
-static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
- u16 val)
+void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
- struct ksz_device *dev = ds->priv;
-
/* No real PHY after this. */
if (addr >= dev->phy_port_cnt)
- return 0;
+ return;
/* No gigabit support. Do not write to this register. */
if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000)
- return 0;
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+ return;
- return 0;
+ ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
}
-static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
- u8 member)
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
}
-static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state)
-{
- ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
-}
-
-static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
+ const u16 *regs = dev->info->regs;
u8 data;
regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2,
@@ -377,24 +347,21 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
if (port < dev->info->port_cnt) {
/* flush individual port */
- ksz_pread8(dev, port, P_STP_CTRL, &data);
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
if (!(data & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, port, P_STP_CTRL,
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL],
data | PORT_LEARN_DISABLE);
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
} else {
/* flush all */
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
}
}
-static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -408,11 +375,10 @@ static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
int err;
@@ -445,10 +411,9 @@ static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
- struct ksz_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
u32 vlan_table[3];
u16 pvid;
@@ -479,11 +444,9 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -537,11 +500,9 @@ exit:
return ret;
}
-static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -628,10 +589,9 @@ static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
alu->mac[5] = alu_table[3] & 0xFF;
}
-static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ksz_device *dev = ds->priv;
int ret = 0;
u32 ksz_data;
u32 alu_table[4];
@@ -680,17 +640,20 @@ exit:
return ret;
}
-static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
u32 mac_hi, mac_lo;
int err = 0;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
@@ -699,8 +662,8 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -744,7 +707,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -756,17 +719,20 @@ exit:
return err;
}
-static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
int ret = 0;
u32 mac_hi, mac_lo;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
@@ -775,8 +741,8 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -818,7 +784,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -832,11 +798,10 @@ exit:
return ret;
}
-static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress, struct netlink_ext_ack *extack)
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
u8 data;
int p;
@@ -872,10 +837,9 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct ksz_device *dev = ds->priv;
bool in_use = false;
u8 data;
int p;
@@ -1097,16 +1061,17 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
}
-static void ksz9477_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
{
- ksz_phylink_get_caps(ds, port, config);
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
- config->mac_capabilities = MAC_10 | MAC_100 | MAC_1000FD |
- MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
+ if (dev->features & GBIT_SUPPORT)
+ config->mac_capabilities |= MAC_1000FD;
}
-static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct ksz_port *p = &dev->ports[port];
struct dsa_switch *ds = dev->ds;
@@ -1203,7 +1168,7 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
}
-static void ksz9477_config_cpu_port(struct dsa_switch *ds)
+void ksz9477_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
@@ -1260,7 +1225,7 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
continue;
p = &dev->ports[i];
- ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
p->on = 1;
if (i < dev->phy_port_cnt)
p->phy = 1;
@@ -1273,22 +1238,44 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
}
}
-static int ksz9477_setup(struct dsa_switch *ds)
+int ksz9477_enable_stp_addr(struct ksz_device *dev)
{
- struct ksz_device *dev = ds->priv;
- int ret = 0;
+ const u32 *masks;
+ u32 data;
+ int ret;
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->info->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
+ masks = dev->info->masks;
- ret = ksz9477_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
+ /* Enable Reserved multicast table */
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
+
+ /* Set the Override bit for forwarding BPDU packet to CPU */
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
+ ALU_V_OVERRIDE | BIT(dev->cpu_port));
+ if (ret < 0)
+ return ret;
+
+ data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
+
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
return ret;
}
+ return 0;
+}
+
+int ksz9477_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
/* Required for port partitioning. */
ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
true);
@@ -1305,69 +1292,27 @@ static int ksz9477_setup(struct dsa_switch *ds)
if (ret)
return ret;
- ksz9477_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
-
/* queue based egress rate limit */
ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
- /* start switch */
- ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
return 0;
}
-static const struct dsa_switch_ops ksz9477_switch_ops = {
- .get_tag_protocol = ksz9477_get_tag_protocol,
- .setup = ksz9477_setup,
- .phy_read = ksz9477_phy_read16,
- .phy_write = ksz9477_phy_write16,
- .phylink_mac_link_down = ksz_mac_link_down,
- .phylink_get_caps = ksz9477_get_caps,
- .port_enable = ksz_enable_port,
- .get_strings = ksz_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz9477_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz9477_port_vlan_filtering,
- .port_vlan_add = ksz9477_port_vlan_add,
- .port_vlan_del = ksz9477_port_vlan_del,
- .port_fdb_dump = ksz9477_port_fdb_dump,
- .port_fdb_add = ksz9477_port_fdb_add,
- .port_fdb_del = ksz9477_port_fdb_del,
- .port_mdb_add = ksz9477_port_mdb_add,
- .port_mdb_del = ksz9477_port_mdb_del,
- .port_mirror_add = ksz9477_port_mirror_add,
- .port_mirror_del = ksz9477_port_mirror_del,
- .get_stats64 = ksz_get_stats64,
- .port_change_mtu = ksz9477_change_mtu,
- .port_max_mtu = ksz9477_max_mtu,
-};
-
-static u32 ksz9477_get_port_addr(int port, int offset)
+u32 ksz9477_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz9477_switch_detect(struct ksz_device *dev)
+int ksz9477_switch_init(struct ksz_device *dev)
{
u8 data8;
- u8 id_hi;
- u8 id_lo;
- u32 id32;
int ret;
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
/* turn off SPI DO Edge select */
ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
if (ret)
@@ -1378,10 +1323,6 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
if (ret)
return ret;
- /* read chip id */
- ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
- if (ret)
- return ret;
ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
if (ret)
return ret;
@@ -1392,12 +1333,7 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
/* Default capability is gigabit capable. */
dev->features = GBIT_SUPPORT;
- dev_dbg(dev->dev, "Switch detect: ID=%08x%02x\n", id32, data8);
- id_hi = (u8)(id32 >> 16);
- id_lo = (u8)(id32 >> 8);
- if ((id_lo & 0xf) == 3) {
- /* Chip is from KSZ9893 design. */
- dev_info(dev->dev, "Found KSZ9893\n");
+ if (dev->chip_id == KSZ9893_CHIP_ID) {
dev->features |= IS_9893;
/* Chip does not support gigabit. */
@@ -1405,7 +1341,6 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
dev->features &= ~GBIT_SUPPORT;
dev->phy_port_cnt = 2;
} else {
- dev_info(dev->dev, "Found KSZ9477 or compatible\n");
/* Chip uses new XMII register definitions. */
dev->features |= NEW_XMII;
@@ -1414,72 +1349,14 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
dev->features &= ~GBIT_SUPPORT;
}
- /* Change chip id to known ones so it can be matched against them. */
- id32 = (id_hi << 16) | (id_lo << 8);
-
- dev->chip_id = id32;
-
return 0;
}
-static int ksz9477_switch_init(struct ksz_device *dev)
-{
- dev->ds->ops = &ksz9477_switch_ops;
-
- dev->port_mask = (1 << dev->info->port_cnt) - 1;
-
- return 0;
-}
-
-static void ksz9477_switch_exit(struct ksz_device *dev)
+void ksz9477_switch_exit(struct ksz_device *dev)
{
ksz9477_reset_switch(dev);
}
-static const struct ksz_dev_ops ksz9477_dev_ops = {
- .get_port_addr = ksz9477_get_port_addr,
- .cfg_port_member = ksz9477_cfg_port_member,
- .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
- .port_setup = ksz9477_port_setup,
- .r_mib_cnt = ksz9477_r_mib_cnt,
- .r_mib_pkt = ksz9477_r_mib_pkt,
- .r_mib_stat64 = ksz_r_mib_stats64,
- .freeze_mib = ksz9477_freeze_mib,
- .port_init_cnt = ksz9477_port_init_cnt,
- .shutdown = ksz9477_reset_switch,
- .detect = ksz9477_switch_detect,
- .init = ksz9477_switch_init,
- .exit = ksz9477_switch_exit,
-};
-
-int ksz9477_switch_register(struct ksz_device *dev)
-{
- int ret, i;
- struct phy_device *phydev;
-
- ret = ksz_switch_register(dev, &ksz9477_dev_ops);
- if (ret)
- return ret;
-
- for (i = 0; i < dev->phy_port_cnt; ++i) {
- if (!dsa_is_user_port(dev->ds, i))
- continue;
-
- phydev = dsa_to_port(dev->ds, i)->slave->phydev;
-
- /* The MAC actually cannot run in 1000 half-duplex mode. */
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* PHY does not support gigabit. */
- if (!(dev->features & GBIT_SUPPORT))
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
- }
- return ret;
-}
-EXPORT_SYMBOL(ksz9477_switch_register);
-
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
new file mode 100644
index 000000000000..cd278b307b3c
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip KSZ9477 series Header file
+ *
+ * Copyright (C) 2017-2022 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ9477_H
+#define __KSZ9477_H
+
+#include <net/dsa.h>
+#include "ksz_common.h"
+
+int ksz9477_setup(struct dsa_switch *ds);
+u32 ksz9477_get_port_addr(int port, int offset);
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port);
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_get_stp_reg(void);
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu);
+int ksz9477_max_mtu(struct ksz_device *dev, int port);
+void ksz9477_config_cpu_port(struct dsa_switch *ds);
+int ksz9477_enable_stp_addr(struct ksz_device *dev);
+int ksz9477_reset_switch(struct ksz_device *dev);
+int ksz9477_dsa_init(struct ksz_device *dev);
+int ksz9477_switch_init(struct ksz_device *dev);
+void ksz9477_switch_exit(struct ksz_device *dev);
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index faa3163c86b0..99966514d444 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -41,7 +41,7 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
if (i2c->dev.platform_data)
dev->pdata = i2c->dev.platform_data;
- ret = ksz9477_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -71,8 +71,8 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
if (!dev)
return;
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
dsa_switch_shutdown(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 7a2c8d4767af..d0cce4ca3cf9 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -25,7 +25,6 @@
#define REG_CHIP_ID2__1 0x0002
-#define CHIP_ID_63 0x63
#define CHIP_ID_66 0x66
#define CHIP_ID_67 0x67
#define CHIP_ID_77 0x77
@@ -166,7 +165,6 @@
#define SW_DOUBLE_TAG BIT(7)
#define SW_RESET BIT(1)
-#define SW_START BIT(0)
#define REG_SW_MAC_ADDR_0 0x0302
#define REG_SW_MAC_ADDR_1 0x0303
@@ -266,7 +264,6 @@
#define REG_SW_MAC_CTRL_1 0x0331
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
@@ -277,13 +274,9 @@
#define REG_SW_MAC_CTRL_2 0x0332
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_MAC_CTRL_3 0x0333
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_MAC_CTRL_4 0x0334
#define SW_PASS_PAUSE BIT(3)
@@ -426,12 +419,9 @@
#define REG_SW_ALU_STAT_CTRL__4 0x041C
-#define ALU_STAT_INDEX_M (BIT(4) - 1)
-#define ALU_STAT_INDEX_S 16
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
#define ALU_RESV_MCAST_ADDR BIT(1)
-#define ALU_STAT_READ BIT(0)
#define REG_SW_ALU_VAL_A 0x0420
@@ -1269,8 +1259,6 @@
/* 5 - MIB Counters */
#define REG_PORT_MIB_CTRL_STAT__4 0x0500
-#define MIB_COUNTER_OVERFLOW BIT(31)
-#define MIB_COUNTER_VALID BIT(30)
#define MIB_COUNTER_READ BIT(25)
#define MIB_COUNTER_FLUSH_FREEZE BIT(24)
#define MIB_COUNTER_INDEX_M (BIT(8) - 1)
@@ -1629,11 +1617,7 @@
#define P_BCAST_STORM_CTRL REG_PORT_MAC_CTRL_0
#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
#define P_MIRROR_CTRL REG_PORT_MRI_MIRROR_CTRL
-#define P_STP_CTRL REG_PORT_LUE_MSTP_STATE
#define P_PHY_CTRL REG_PORT_PHY_CTRL
-#define P_NEG_RESTART_CTRL REG_PORT_PHY_CTRL
-#define P_LINK_STATUS REG_PORT_PHY_STATUS
-#define P_SPEED_STATUS REG_PORT_PHY_PHY_CTRL
#define P_RATE_LIMIT_CTRL REG_PORT_MAC_IN_RATE_LIMIT
#define S_LINK_AGING_CTRL REG_SW_LUE_CTRL_1
@@ -1653,12 +1637,6 @@
#define PTP_TRIG_UNIT_M (BIT(MAX_TRIG_UNIT) - 1)
#define PTP_TS_UNIT_M (BIT(MAX_TIMESTAMP_UNIT) - 1)
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
-
#define KSZ9477_MAX_FRAME_SIZE 9000
#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
deleted file mode 100644
index 1bc8b0cbe458..000000000000
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip KSZ9477 series register access through SPI
- *
- * Copyright (C) 2017-2019 Microchip Technology Inc.
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-
-#include "ksz_common.h"
-
-#define SPI_ADDR_SHIFT 24
-#define SPI_ADDR_ALIGN 3
-#define SPI_TURNAROUND_SHIFT 5
-
-KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
- SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN);
-
-static int ksz9477_spi_probe(struct spi_device *spi)
-{
- struct regmap_config rc;
- struct ksz_device *dev;
- int i, ret;
-
- dev = ksz_switch_alloc(&spi->dev, spi);
- if (!dev)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- rc = ksz9477_regmap_config[i];
- rc.lock_arg = &dev->regmap_mutex;
- dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
- if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&spi->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz9477_regmap_config[i].val_bits, ret);
- return ret;
- }
- }
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- /* setup spi */
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret)
- return ret;
-
- ret = ksz9477_switch_register(dev);
-
- /* Main DSA driver may not be started yet. */
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static void ksz9477_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static void ksz9477_spi_shutdown(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- dsa_switch_shutdown(dev->ds);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static const struct of_device_id ksz9477_dt_ids[] = {
- {
- .compatible = "microchip,ksz9477",
- .data = &ksz_switch_chips[KSZ9477]
- },
- {
- .compatible = "microchip,ksz9897",
- .data = &ksz_switch_chips[KSZ9897]
- },
- {
- .compatible = "microchip,ksz9893",
- .data = &ksz_switch_chips[KSZ9893]
- },
- {
- .compatible = "microchip,ksz9563",
- .data = &ksz_switch_chips[KSZ9893]
- },
- {
- .compatible = "microchip,ksz8563",
- .data = &ksz_switch_chips[KSZ9893]
- },
- {
- .compatible = "microchip,ksz9567",
- .data = &ksz_switch_chips[KSZ9567]
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
-
-static const struct spi_device_id ksz9477_spi_ids[] = {
- { "ksz9477" },
- { "ksz9897" },
- { "ksz9893" },
- { "ksz9563" },
- { "ksz8563" },
- { "ksz9567" },
- { },
-};
-MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids);
-
-static struct spi_driver ksz9477_spi_driver = {
- .driver = {
- .name = "ksz9477-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz9477_dt_ids),
- },
- .id_table = ksz9477_spi_ids,
- .probe = ksz9477_spi_probe,
- .remove = ksz9477_spi_remove,
- .shutdown = ksz9477_spi_shutdown,
-};
-
-module_spi_driver(ksz9477_spi_driver);
-
-MODULE_ALIAS("spi:ksz9477");
-MODULE_ALIAS("spi:ksz9897");
-MODULE_ALIAS("spi:ksz9893");
-MODULE_ALIAS("spi:ksz9563");
-MODULE_ALIAS("spi:ksz8563");
-MODULE_ALIAS("spi:ksz9567");
-MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 9ca8c8d7740f..28d7cb2ce98f 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -16,10 +16,14 @@
#include <linux/if_bridge.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/micrel_phy.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz8.h"
+#include "ksz9477.h"
+#include "lan937x.h"
#define MIB_COUNTER_NUM 0x20
@@ -138,6 +142,234 @@ static const struct ksz_mib_names ksz9477_mib_names[] = {
{ 0x83, "tx_discards" },
};
+static const struct ksz_dev_ops ksz8_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+};
+
+static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .setup = ksz9477_setup,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = ksz9477_port_setup,
+ .r_phy = ksz9477_r_phy,
+ .w_phy = ksz9477_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = ksz9477_get_caps,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = ksz9477_change_mtu,
+ .max_mtu = ksz9477_max_mtu,
+ .config_cpu_port = ksz9477_config_cpu_port,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = ksz9477_reset_switch,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+};
+
+static const struct ksz_dev_ops lan937x_dev_ops = {
+ .setup = lan937x_setup,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = lan937x_port_setup,
+ .r_phy = lan937x_r_phy,
+ .w_phy = lan937x_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = lan937x_phylink_get_caps,
+ .phylink_mac_config = lan937x_phylink_mac_config,
+ .phylink_mac_link_up = lan937x_phylink_mac_link_up,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = lan937x_change_mtu,
+ .max_mtu = ksz9477_max_mtu,
+ .config_cpu_port = lan937x_config_cpu_port,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = lan937x_reset_switch,
+ .init = lan937x_switch_init,
+ .exit = lan937x_switch_exit,
+};
+
+static const u16 ksz8795_regs[] = {
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x74,
+ [REG_IND_BYTE] = 0xA0,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x07,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x08,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8795_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(6),
+ [MIB_COUNTER_VALID] = BIT(5),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+};
+
+static const u8 ksz8795_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
+static const u16 ksz8863_regs[] = {
+ [REG_IND_CTRL_0] = 0x79,
+ [REG_IND_DATA_8] = 0x7B,
+ [REG_IND_DATA_CHECK] = 0x7B,
+ [REG_IND_DATA_HI] = 0x7C,
+ [REG_IND_DATA_LO] = 0x80,
+ [REG_IND_MIB_CHECK] = 0x80,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0x03,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8863_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(6),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8863_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 24,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
+static const u16 ksz9477_regs[] = {
+ [P_STP_CTRL] = 0x0B04,
+ [S_START_CTRL] = 0x0300,
+ [S_BROADCAST_CTRL] = 0x0332,
+ [S_MULTICAST_CTRL] = 0x0331,
+};
+
+static const u32 ksz9477_masks[] = {
+ [ALU_STAT_WRITE] = 0,
+ [ALU_STAT_READ] = 1,
+};
+
+static const u8 ksz9477_shifts[] = {
+ [ALU_STAT_INDEX] = 16,
+};
+
+static const u32 lan937x_masks[] = {
+ [ALU_STAT_WRITE] = 1,
+ [ALU_STAT_READ] = 2,
+};
+
+static const u8 lan937x_shifts[] = {
+ [ALU_STAT_INDEX] = 8,
+};
+
const struct ksz_chip_data ksz_switch_chips[] = {
[KSZ8795] = {
.chip_id = KSZ8795_CHIP_ID,
@@ -147,10 +379,14 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
@@ -179,10 +415,14 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
@@ -197,10 +437,14 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
@@ -215,9 +459,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x4, /* can be configured as cpu port */
.port_cnt = 3,
+ .ops = &ksz8_dev_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8863_regs,
+ .masks = ksz8863_masks,
+ .shifts = ksz8863_shifts,
.supports_mii = {false, false, true},
.supports_rmii = {false, false, true},
.internal_phy = {true, true, false},
@@ -231,10 +479,14 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
.supports_mii = {false, false, false, false,
false, true, false},
.supports_rmii = {false, false, false, false,
@@ -253,10 +505,14 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
.supports_mii = {false, false, false, false,
false, true, true},
.supports_rmii = {false, false, false, false,
@@ -275,9 +531,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
+ .ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
.supports_mii = {false, false, true},
.supports_rmii = {false, false, true},
.supports_rgmii = {false, false, true},
@@ -292,10 +552,14 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
.supports_mii = {false, false, false, false,
false, true, true},
.supports_rmii = {false, false, false, false,
@@ -314,9 +578,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
@@ -331,9 +599,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
.supports_mii = {false, false, false, false, true, true},
.supports_rmii = {false, false, false, false, true, true},
.supports_rgmii = {false, false, false, false, true, true},
@@ -348,9 +620,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
.supports_mii = {false, false, false, false,
true, true, false, false},
.supports_rmii = {false, false, false, false,
@@ -369,9 +645,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x38, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
.supports_mii = {false, false, false, false,
true, true, false, false},
.supports_rmii = {false, false, false, false,
@@ -390,9 +670,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
.supports_mii = {false, false, false, false,
true, true, false, false},
.supports_rmii = {false, false, false, false,
@@ -436,8 +720,8 @@ static int ksz_check_device_id(struct ksz_device *dev)
return 0;
}
-void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
+static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct ksz_device *dev = ds->priv;
@@ -456,23 +740,29 @@ void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->info->internal_phy[port])
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+
+ if (dev->dev_ops->get_caps)
+ dev->dev_ops->get_caps(dev, port, config);
}
-EXPORT_SYMBOL_GPL(ksz_phylink_get_caps);
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
{
+ struct ethtool_pause_stats *pstats;
struct rtnl_link_stats64 *stats;
struct ksz_stats_raw *raw;
struct ksz_port_mib *mib;
mib = &dev->ports[port].mib;
stats = &mib->stats64;
+ pstats = &mib->pause_stats;
raw = (struct ksz_stats_raw *)mib->counters;
spin_lock(&mib->stats64_lock);
- stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
- stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
+ raw->rx_pause;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
+ raw->tx_pause;
/* HW counters are counting bytes + FCS which is not acceptable
* for rtnl_link_stats64 interface
@@ -498,12 +788,14 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
stats->multicast = raw->rx_mcast;
stats->collisions = raw->tx_total_col;
+ pstats->tx_pause_frames = raw->tx_pause;
+ pstats->rx_pause_frames = raw->rx_pause;
+
spin_unlock(&mib->stats64_lock);
}
-EXPORT_SYMBOL_GPL(ksz_r_mib_stats64);
-void ksz_get_stats64(struct dsa_switch *ds, int port,
- struct rtnl_link_stats64 *s)
+static void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
{
struct ksz_device *dev = ds->priv;
struct ksz_port_mib *mib;
@@ -514,10 +806,22 @@ void ksz_get_stats64(struct dsa_switch *ds, int port,
memcpy(s, &mib->stats64, sizeof(*s));
spin_unlock(&mib->stats64_lock);
}
-EXPORT_SYMBOL_GPL(ksz_get_stats64);
-void ksz_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
+static void ksz_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
{
struct ksz_device *dev = ds->priv;
int i;
@@ -530,9 +834,8 @@ void ksz_get_strings(struct dsa_switch *ds, int port,
dev->info->mib_names[i].string, ETH_GSTRING_LEN);
}
}
-EXPORT_SYMBOL_GPL(ksz_get_strings);
-void ksz_update_port_member(struct ksz_device *dev, int port)
+static void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
struct dsa_switch *ds = dev->ds;
@@ -589,7 +892,55 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
-EXPORT_SYMBOL_GPL(ksz_update_port_member);
+
+static int ksz_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ const u16 *regs;
+ int ret;
+
+ regs = dev->info->regs;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->info->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = dev->dev_ops->reset(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ /* set broadcast storm protection 10% rate */
+ regmap_update_bits(dev->regmap[1], regs[S_BROADCAST_CTRL],
+ BROADCAST_STORM_RATE,
+ (BROADCAST_STORM_VALUE *
+ BROADCAST_STORM_PROT_RATE) / 100);
+
+ dev->dev_ops->config_cpu_port(ds);
+
+ dev->dev_ops->enable_stp_addr(dev);
+
+ regmap_update_bits(dev->regmap[0], regs[S_MULTICAST_CTRL],
+ MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE);
+
+ ksz_init_mib_timer(dev);
+
+ ds->configure_vlan_while_not_filtering = false;
+
+ if (dev->dev_ops->setup) {
+ ret = dev->dev_ops->setup(ds);
+ if (ret)
+ return ret;
+ }
+
+ /* start switch */
+ regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
+ SW_START, SW_START);
+
+ return 0;
+}
static void port_r_cnt(struct ksz_device *dev, int port)
{
@@ -667,9 +1018,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)
memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64));
}
}
-EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
+static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
@@ -678,9 +1028,8 @@ int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
return val;
}
-EXPORT_SYMBOL_GPL(ksz_phy_read16);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
@@ -688,10 +1037,25 @@ int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_phy_write16);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
+static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID) {
+ /* Silicon Errata Sheet (DS80000830A):
+ * Port 1 does not work with LinkMD Cable-Testing.
+ * Port 1 does not respond to received PAUSE control frames.
+ */
+ if (!port)
+ return MICREL_KSZ8_P1_ERRATA;
+ }
+
+ return 0;
+}
+
+static void ksz_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
@@ -702,9 +1066,8 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
}
-EXPORT_SYMBOL_GPL(ksz_mac_link_down);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
+static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ksz_device *dev = ds->priv;
@@ -713,9 +1076,9 @@ int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
return dev->info->mib_cnt;
}
-EXPORT_SYMBOL_GPL(ksz_sset_count);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *buf)
{
const struct dsa_port *dp = dsa_to_port(ds, port);
struct ksz_device *dev = ds->priv;
@@ -731,12 +1094,11 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64));
mutex_unlock(&mib->cnt_mutex);
}
-EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge,
- bool *tx_fwd_offload,
- struct netlink_ext_ack *extack)
+static int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
@@ -744,135 +1106,83 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
+static void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
{
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
*/
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
-void ksz_port_fast_age(struct dsa_switch *ds, int port)
+static void ksz_port_fast_age(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
dev->dev_ops->flush_dyn_mac_table(dev, port);
}
-EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data)
+static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
- int ret = 0;
- u16 i = 0;
- u16 entries = 0;
- u8 timestamp = 0;
- u8 fid;
- u8 member;
- struct alu_struct alu;
-
- do {
- alu.is_static = false;
- ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
- &member, &timestamp,
- &entries);
- if (!ret && (member & BIT(port))) {
- ret = cb(alu.mac, alu.fid, alu.is_static, data);
- if (ret)
- break;
- }
- i++;
- } while (i < entries);
- if (i >= entries)
- ret = 0;
- return ret;
+ if (!dev->dev_ops->fdb_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_add(dev, port, addr, vid, db);
}
-EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
+static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr,
+ u16 vid, struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
- int empty = 0;
-
- alu.port_forward = 0;
- for (index = 0; index < dev->info->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- /* Remember the first empty entry. */
- } else if (!empty) {
- empty = index + 1;
- }
- }
- /* no available entry */
- if (index == dev->info->num_statics && !empty)
- return -ENOSPC;
+ if (!dev->dev_ops->fdb_del)
+ return -EOPNOTSUPP;
- /* add entry */
- if (index == dev->info->num_statics) {
- index = empty - 1;
- memset(&alu, 0, sizeof(alu));
- memcpy(alu.mac, mdb->addr, ETH_ALEN);
- alu.is_static = true;
- }
- alu.port_forward |= BIT(port);
- if (mdb->vid) {
- alu.is_use_fid = true;
+ return dev->dev_ops->fdb_del(dev, port, addr, vid, db);
+}
- /* Need a way to map VID to FID. */
- alu.fid = mdb->vid;
- }
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ksz_device *dev = ds->priv;
- return 0;
+ if (!dev->dev_ops->fdb_dump)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_dump(dev, port, cb, data);
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
+static int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
-
- for (index = 0; index < dev->info->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- }
- }
- /* no available entry */
- if (index == dev->info->num_statics)
- goto exit;
+ if (!dev->dev_ops->mdb_add)
+ return -EOPNOTSUPP;
- /* clear port */
- alu.port_forward &= ~BIT(port);
- if (!alu.port_forward)
- alu.is_static = false;
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+ return dev->dev_ops->mdb_add(dev, port, mdb, db);
+}
-exit:
- return 0;
+static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mdb_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mdb_del(dev, port, mdb, db);
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+static int ksz_enable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
@@ -888,16 +1198,17 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_enable_port);
-void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state, int reg)
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
+ const u16 *regs;
u8 data;
- ksz_pread8(dev, port, reg, &data);
+ regs = dev->info->regs;
+
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
switch (state) {
@@ -921,14 +1232,239 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
return;
}
- ksz_pwrite8(dev, port, reg, data);
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
p = &dev->ports[port];
p->stp_state = state;
ksz_update_port_member(dev, port);
}
-EXPORT_SYMBOL_GPL(ksz_port_stp_state_set);
+
+static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct ksz_device *dev = ds->priv;
+ enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
+
+ if (dev->chip_id == KSZ8795_CHIP_ID ||
+ dev->chip_id == KSZ8794_CHIP_ID ||
+ dev->chip_id == KSZ8765_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ8795;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID ||
+ dev->chip_id == KSZ9893_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9893;
+
+ if (dev->chip_id == KSZ9477_CHIP_ID ||
+ dev->chip_id == KSZ9897_CHIP_ID ||
+ dev->chip_id == KSZ9567_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9477;
+
+ if (is_lan937x(dev))
+ proto = DSA_TAG_PROTO_LAN937X_VALUE;
+
+ return proto;
+}
+
+static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_filtering)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_filtering(dev, port, flag, extack);
+}
+
+static int ksz_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_add(dev, port, vlan, extack);
+}
+
+static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_del(dev, port, vlan);
+}
+
+static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mirror_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack);
+}
+
+static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->mirror_del)
+ dev->dev_ops->mirror_del(dev, port, mirror);
+}
+
+static int ksz_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->change_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->change_mtu(dev, port, mtu);
+}
+
+static int ksz_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->max_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->max_mtu(dev, port);
+}
+
+static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->phylink_mac_config)
+ dev->dev_ops->phylink_mac_config(dev, port, mode, state);
+}
+
+static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->phylink_mac_link_up)
+ dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
+ phydev, speed, duplex,
+ tx_pause, rx_pause);
+}
+
+static int ksz_switch_detect(struct ksz_device *dev)
+{
+ u8 id1, id2;
+ u16 id16;
+ u32 id32;
+ int ret;
+
+ /* read chip id */
+ ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
+ if (ret)
+ return ret;
+
+ id1 = FIELD_GET(SW_FAMILY_ID_M, id16);
+ id2 = FIELD_GET(SW_CHIP_ID_M, id16);
+
+ switch (id1) {
+ case KSZ87_FAMILY_ID:
+ if (id2 == KSZ87_CHIP_ID_95) {
+ u8 val;
+
+ dev->chip_id = KSZ8795_CHIP_ID;
+
+ ksz_read8(dev, KSZ8_PORT_STATUS_0, &val);
+ if (val & KSZ8_PORT_FIBER_MODE)
+ dev->chip_id = KSZ8765_CHIP_ID;
+ } else if (id2 == KSZ87_CHIP_ID_94) {
+ dev->chip_id = KSZ8794_CHIP_ID;
+ } else {
+ return -ENODEV;
+ }
+ break;
+ case KSZ88_FAMILY_ID:
+ if (id2 == KSZ88_CHIP_ID_63)
+ dev->chip_id = KSZ8830_CHIP_ID;
+ else
+ return -ENODEV;
+ break;
+ default:
+ ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
+ if (ret)
+ return ret;
+
+ dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32);
+ id32 &= ~0xFF;
+
+ switch (id32) {
+ case KSZ9477_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case LAN9370_CHIP_ID:
+ case LAN9371_CHIP_ID:
+ case LAN9372_CHIP_ID:
+ case LAN9373_CHIP_ID:
+ case LAN9374_CHIP_ID:
+ dev->chip_id = id32;
+ break;
+ default:
+ dev_err(dev->dev,
+ "unsupported switch detected %x)\n", id32);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static const struct dsa_switch_ops ksz_switch_ops = {
+ .get_tag_protocol = ksz_get_tag_protocol,
+ .get_phy_flags = ksz_get_phy_flags,
+ .setup = ksz_setup,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+ .phylink_get_caps = ksz_phylink_get_caps,
+ .phylink_mac_config = ksz_phylink_mac_config,
+ .phylink_mac_link_up = ksz_phylink_mac_link_up,
+ .phylink_mac_link_down = ksz_mac_link_down,
+ .port_enable = ksz_enable_port,
+ .get_strings = ksz_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz_port_stp_state_set,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz_port_vlan_filtering,
+ .port_vlan_add = ksz_port_vlan_add,
+ .port_vlan_del = ksz_port_vlan_del,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_fdb_add = ksz_port_fdb_add,
+ .port_fdb_del = ksz_port_fdb_del,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+ .port_mirror_add = ksz_port_mirror_add,
+ .port_mirror_del = ksz_port_mirror_del,
+ .get_stats64 = ksz_get_stats64,
+ .get_pause_stats = ksz_get_pause_stats,
+ .port_change_mtu = ksz_change_mtu,
+ .port_max_mtu = ksz_max_mtu,
+};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
@@ -941,6 +1477,7 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
ds->dev = base;
ds->num_ports = DSA_MAX_PORTS;
+ ds->ops = &ksz_switch_ops;
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
@@ -956,8 +1493,7 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
}
EXPORT_SYMBOL(ksz_switch_alloc);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops)
+int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
struct device_node *port, *ports;
@@ -986,10 +1522,9 @@ int ksz_switch_register(struct ksz_device *dev,
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
- dev->dev_ops = ops;
-
- if (dev->dev_ops->detect(dev))
- return -EINVAL;
+ ret = ksz_switch_detect(dev);
+ if (ret)
+ return ret;
info = ksz_lookup_info(dev->chip_id);
if (!info)
@@ -998,10 +1533,15 @@ int ksz_switch_register(struct ksz_device *dev,
/* Update the compatible info with the probed one */
dev->info = info;
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->info->dev_name, dev->chip_rev);
+
ret = ksz_check_device_id(dev);
if (ret)
return ret;
+ dev->dev_ops = dev->info->ops;
+
ret = dev->dev_ops->init(dev);
if (ret)
return ret;
@@ -1072,7 +1612,7 @@ int ksz_switch_register(struct ksz_device *dev,
/* Start the MIB timer. */
schedule_delayed_work(&dev->mib_read, 0);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ksz_switch_register);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 8500eaedad67..d5dddb7ec045 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -25,6 +25,7 @@ struct ksz_port_mib {
u8 cnt_ptr;
u64 *counters;
struct rtnl_link_stats64 stats64;
+ struct ethtool_pause_stats pause_stats;
struct spinlock stats64_lock;
};
@@ -41,11 +42,19 @@ struct ksz_chip_data {
int num_statics;
int cpu_ports;
int port_cnt;
+ const struct ksz_dev_ops *ops;
bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
const struct ksz_mib_names *mib_names;
int mib_cnt;
u8 reg_mib_cnt;
+ const u16 *regs;
+ const u32 *masks;
+ const u8 *shifts;
+ int stp_ctrl_reg;
+ int broadcast_ctrl_reg;
+ int multicast_ctrl_reg;
+ int start_ctrl_reg;
bool supports_mii[KSZ_MAX_NUM_PORTS];
bool supports_rmii[KSZ_MAX_NUM_PORTS];
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
@@ -90,6 +99,7 @@ struct ksz_device {
/* chip specific data */
u32 chip_id;
+ u8 chip_rev;
int cpu_port; /* port connected to CPU */
int phy_port_cnt;
phy_interface_t compat_interface;
@@ -140,6 +150,64 @@ enum ksz_chip_id {
LAN9374_CHIP_ID = 0x00937400,
};
+enum ksz_regs {
+ REG_IND_CTRL_0,
+ REG_IND_DATA_8,
+ REG_IND_DATA_CHECK,
+ REG_IND_DATA_HI,
+ REG_IND_DATA_LO,
+ REG_IND_MIB_CHECK,
+ REG_IND_BYTE,
+ P_FORCE_CTRL,
+ P_LINK_STATUS,
+ P_LOCAL_CTRL,
+ P_NEG_RESTART_CTRL,
+ P_REMOTE_STATUS,
+ P_SPEED_STATUS,
+ S_TAIL_TAG_CTRL,
+ P_STP_CTRL,
+ S_START_CTRL,
+ S_BROADCAST_CTRL,
+ S_MULTICAST_CTRL,
+};
+
+enum ksz_masks {
+ PORT_802_1P_REMAPPING,
+ SW_TAIL_TAG_ENABLE,
+ MIB_COUNTER_OVERFLOW,
+ MIB_COUNTER_VALID,
+ VLAN_TABLE_FID,
+ VLAN_TABLE_MEMBERSHIP,
+ VLAN_TABLE_VALID,
+ STATIC_MAC_TABLE_VALID,
+ STATIC_MAC_TABLE_USE_FID,
+ STATIC_MAC_TABLE_FID,
+ STATIC_MAC_TABLE_OVERRIDE,
+ STATIC_MAC_TABLE_FWD_PORTS,
+ DYNAMIC_MAC_TABLE_ENTRIES_H,
+ DYNAMIC_MAC_TABLE_MAC_EMPTY,
+ DYNAMIC_MAC_TABLE_NOT_READY,
+ DYNAMIC_MAC_TABLE_ENTRIES,
+ DYNAMIC_MAC_TABLE_FID,
+ DYNAMIC_MAC_TABLE_SRC_PORT,
+ DYNAMIC_MAC_TABLE_TIMESTAMP,
+ ALU_STAT_WRITE,
+ ALU_STAT_READ,
+};
+
+enum ksz_shifts {
+ VLAN_TABLE_MEMBERSHIP_S,
+ VLAN_TABLE,
+ STATIC_MAC_FWD_PORTS,
+ STATIC_MAC_FID,
+ DYNAMIC_MAC_ENTRIES_H,
+ DYNAMIC_MAC_ENTRIES,
+ DYNAMIC_MAC_FID,
+ DYNAMIC_MAC_TIMESTAMP,
+ DYNAMIC_MAC_SRC_PORT,
+ ALU_STAT_INDEX,
+};
+
struct alu_struct {
/* entry 1 */
u8 is_static:1;
@@ -160,6 +228,7 @@ struct alu_struct {
};
struct ksz_dev_ops {
+ int (*setup)(struct dsa_switch *ds);
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
@@ -167,71 +236,65 @@ struct ksz_dev_ops {
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
- int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp,
- u16 *entries);
- int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
- void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
void (*r_mib_stat64)(struct ksz_device *dev, int port);
+ int (*vlan_filtering)(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+ int (*vlan_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+ int (*vlan_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+ int (*mirror_add)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+ void (*mirror_del)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+ int (*fdb_add)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_del)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_dump)(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+ int (*mdb_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ int (*mdb_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ void (*get_caps)(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+ int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
+ int (*max_mtu)(struct ksz_device *dev, int port);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
- int (*shutdown)(struct ksz_device *dev);
- int (*detect)(struct ksz_device *dev);
+ void (*phylink_mac_config)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause);
+ void (*config_cpu_port)(struct dsa_switch *ds);
+ int (*enable_stp_addr)(struct ksz_device *dev);
+ int (*reset)(struct ksz_device *dev);
int (*init)(struct ksz_device *dev);
void (*exit)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops);
+int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
-int ksz8_switch_register(struct ksz_device *dev);
-int ksz9477_switch_register(struct ksz_device *dev);
-
-void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
void ksz_r_mib_stats64(struct ksz_device *dev, int port);
-void ksz_get_stats64(struct dsa_switch *ds, int port,
- struct rtnl_link_stats64 *s);
-void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
extern const struct ksz_chip_data ksz_switch_chips[];
-/* Common DSA access functions */
-
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload,
- struct netlink_ext_ack *extack);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge);
-void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state, int reg);
-void ksz_port_fast_age(struct dsa_switch *ds, int port);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
-void ksz_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf);
-
/* Common register access functions */
static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
@@ -348,11 +411,51 @@ static inline void ksz_regmap_unlock(void *__mtx)
mutex_unlock(mtx);
}
+static inline int is_lan937x(struct ksz_device *dev)
+{
+ return dev->chip_id == LAN9370_CHIP_ID ||
+ dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID ||
+ dev->chip_id == LAN9373_CHIP_ID ||
+ dev->chip_id == LAN9374_CHIP_ID;
+}
+
/* STP State Defines */
#define PORT_TX_ENABLE BIT(2)
#define PORT_RX_ENABLE BIT(1)
#define PORT_LEARN_DISABLE BIT(0)
+/* Switch ID Defines */
+#define REG_CHIP_ID0 0x00
+
+#define SW_FAMILY_ID_M GENMASK(15, 8)
+#define KSZ87_FAMILY_ID 0x87
+#define KSZ88_FAMILY_ID 0x88
+
+#define KSZ8_PORT_STATUS_0 0x08
+#define KSZ8_PORT_FIBER_MODE BIT(7)
+
+#define SW_CHIP_ID_M GENMASK(7, 4)
+#define KSZ87_CHIP_ID_94 0x6
+#define KSZ87_CHIP_ID_95 0x9
+#define KSZ88_CHIP_ID_63 0x3
+
+#define SW_REV_ID_M GENMASK(7, 4)
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROT_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+#define BROADCAST_STORM_RATE_HI 0x07
+#define BROADCAST_STORM_RATE_LO 0xFF
+#define BROADCAST_STORM_RATE 0x07FF
+
+#define MULTICAST_STORM_DISABLE BIT(6)
+
+#define SW_START 0x01
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 961a74c359a8..4844830dca72 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Microchip KSZ8795 series register access through SPI
+ * Microchip ksz series register access through SPI
*
* Copyright (C) 2017 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
@@ -14,7 +14,6 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include "ksz8.h"
#include "ksz_common.h"
#define KSZ8795_SPI_ADDR_SHIFT 12
@@ -25,29 +24,29 @@
#define KSZ8863_SPI_ADDR_ALIGN 8
#define KSZ8863_SPI_TURNAROUND_SHIFT 0
+#define KSZ9477_SPI_ADDR_SHIFT 24
+#define KSZ9477_SPI_ADDR_ALIGN 3
+#define KSZ9477_SPI_TURNAROUND_SHIFT 5
+
KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
-static int ksz8795_spi_probe(struct spi_device *spi)
+KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
+ KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
+
+static int ksz_spi_probe(struct spi_device *spi)
{
const struct regmap_config *regmap_config;
const struct ksz_chip_data *chip;
struct device *ddev = &spi->dev;
struct regmap_config rc;
struct ksz_device *dev;
- struct ksz8 *ksz8;
int i, ret = 0;
- ksz8 = devm_kzalloc(&spi->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = spi;
-
- dev = ksz_switch_alloc(&spi->dev, ksz8);
+ dev = ksz_switch_alloc(&spi->dev, spi);
if (!dev)
return -ENOMEM;
@@ -57,8 +56,12 @@ static int ksz8795_spi_probe(struct spi_device *spi)
if (chip->chip_id == KSZ8830_CHIP_ID)
regmap_config = ksz8863_regmap_config;
- else
+ else if (chip->chip_id == KSZ8795_CHIP_ID ||
+ chip->chip_id == KSZ8794_CHIP_ID ||
+ chip->chip_id == KSZ8765_CHIP_ID)
regmap_config = ksz8795_regmap_config;
+ else
+ regmap_config = ksz9477_regmap_config;
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
rc = regmap_config[i];
@@ -82,7 +85,7 @@ static int ksz8795_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = ksz8_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -93,7 +96,7 @@ static int ksz8795_spi_probe(struct spi_device *spi)
return 0;
}
-static void ksz8795_spi_remove(struct spi_device *spi)
+static void ksz_spi_remove(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
@@ -103,22 +106,22 @@ static void ksz8795_spi_remove(struct spi_device *spi)
spi_set_drvdata(spi, NULL);
}
-static void ksz8795_spi_shutdown(struct spi_device *spi)
+static void ksz_spi_shutdown(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
if (!dev)
return;
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
dsa_switch_shutdown(dev->ds);
spi_set_drvdata(spi, NULL);
}
-static const struct of_device_id ksz8795_dt_ids[] = {
+static const struct of_device_id ksz_dt_ids[] = {
{
.compatible = "microchip,ksz8765",
.data = &ksz_switch_chips[KSZ8765]
@@ -139,34 +142,96 @@ static const struct of_device_id ksz8795_dt_ids[] = {
.compatible = "microchip,ksz8873",
.data = &ksz_switch_chips[KSZ8830]
},
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
+ {
+ .compatible = "microchip,lan9370",
+ .data = &ksz_switch_chips[LAN9370]
+ },
+ {
+ .compatible = "microchip,lan9371",
+ .data = &ksz_switch_chips[LAN9371]
+ },
+ {
+ .compatible = "microchip,lan9372",
+ .data = &ksz_switch_chips[LAN9372]
+ },
+ {
+ .compatible = "microchip,lan9373",
+ .data = &ksz_switch_chips[LAN9373]
+ },
+ {
+ .compatible = "microchip,lan9374",
+ .data = &ksz_switch_chips[LAN9374]
+ },
{},
};
-MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
+MODULE_DEVICE_TABLE(of, ksz_dt_ids);
-static const struct spi_device_id ksz8795_spi_ids[] = {
+static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz8765" },
{ "ksz8794" },
{ "ksz8795" },
{ "ksz8863" },
{ "ksz8873" },
+ { "ksz9477" },
+ { "ksz9897" },
+ { "ksz9893" },
+ { "ksz9563" },
+ { "ksz8563" },
+ { "ksz9567" },
+ { "lan9370" },
+ { "lan9371" },
+ { "lan9372" },
+ { "lan9373" },
+ { "lan9374" },
{ },
};
-MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids);
+MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
-static struct spi_driver ksz8795_spi_driver = {
+static struct spi_driver ksz_spi_driver = {
.driver = {
- .name = "ksz8795-switch",
+ .name = "ksz-switch",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz8795_dt_ids),
+ .of_match_table = of_match_ptr(ksz_dt_ids),
},
- .id_table = ksz8795_spi_ids,
- .probe = ksz8795_spi_probe,
- .remove = ksz8795_spi_remove,
- .shutdown = ksz8795_spi_shutdown,
+ .id_table = ksz_spi_ids,
+ .probe = ksz_spi_probe,
+ .remove = ksz_spi_remove,
+ .shutdown = ksz_spi_shutdown,
};
-module_spi_driver(ksz8795_spi_driver);
+module_spi_driver(ksz_spi_driver);
+MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9897");
+MODULE_ALIAS("spi:ksz9893");
+MODULE_ALIAS("spi:ksz9563");
+MODULE_ALIAS("spi:ksz8563");
+MODULE_ALIAS("spi:ksz9567");
+MODULE_ALIAS("spi:lan937x");
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch SPI Driver");
+MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
new file mode 100644
index 000000000000..72ba9cb2fbc6
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip lan937x dev ops headers
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+
+#ifndef __LAN937X_CFG_H
+#define __LAN937X_CFG_H
+
+int lan937x_reset_switch(struct ksz_device *dev);
+int lan937x_setup(struct dsa_switch *ds);
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void lan937x_config_cpu_port(struct dsa_switch *ds);
+int lan937x_switch_init(struct ksz_device *dev);
+void lan937x_switch_exit(struct ksz_device *dev);
+void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void lan937x_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause);
+void lan937x_phylink_mac_config(struct ksz_device *dev, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
new file mode 100644
index 000000000000..c29d175ca6f7
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip LAN937X switch driver main logic
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/math.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "lan937x_reg.h"
+#include "ksz_common.h"
+#include "lan937x.h"
+
+static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
+
+static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset,
+ u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static int lan937x_enable_spi_indirect_access(struct ksz_device *dev)
+{
+ u16 data16;
+ int ret;
+
+ /* Enable Phy access through SPI */
+ ret = lan937x_cfg(dev, REG_GLOBAL_CTRL_0, SW_PHY_REG_BLOCK, false);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read16(dev, REG_VPHY_SPECIAL_CTRL__2, &data16);
+ if (ret < 0)
+ return ret;
+
+ /* Allow SPI access */
+ data16 |= VPHY_SPI_INDIRECT_ENABLE;
+
+ return ksz_write16(dev, REG_VPHY_SPECIAL_CTRL__2, data16);
+}
+
+static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
+{
+ u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
+ u16 temp;
+
+ /* get register address based on the logical port */
+ temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
+
+ return ksz_write16(dev, REG_VPHY_IND_ADDR__2, temp);
+}
+
+static int lan937x_internal_phy_write(struct ksz_device *dev, int addr, int reg,
+ u16 val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port */
+ if (!dev->info->internal_phy[addr])
+ return -EOPNOTSUPP;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write the data to be written to the VPHY reg */
+ ret = ksz_write16(dev, REG_VPHY_IND_DATA__2, val);
+ if (ret < 0)
+ return ret;
+
+ /* Write the Write En and Busy bit */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2,
+ (VPHY_IND_WRITE | VPHY_IND_BUSY));
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to write phy register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
+ u16 *val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port, return 0xffff for non-existent phy */
+ if (!dev->info->internal_phy[addr])
+ return 0xffff;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write Read and Busy bit to start the transaction */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2, VPHY_IND_BUSY);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to read phy register\n");
+ return ret;
+ }
+
+ /* Read the VPHY register which has the PHY data */
+ return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
+}
+
+void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+{
+ lan937x_internal_phy_read(dev, addr, reg, data);
+}
+
+void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+{
+ lan937x_internal_phy_write(dev, addr, reg, val);
+}
+
+static int lan937x_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+ u16 val;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ret = lan937x_internal_phy_read(dev, addr, regnum, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static int lan937x_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return lan937x_internal_phy_write(dev, addr, regnum, val);
+}
+
+static int lan937x_mdio_register(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret;
+
+ mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
+ if (!mdio_np) {
+ dev_err(ds->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus) {
+ of_node_put(mdio_np);
+ return -ENOMEM;
+ }
+
+ bus->priv = dev;
+ bus->read = lan937x_sw_mdio_read;
+ bus->write = lan937x_sw_mdio_write;
+ bus->name = "lan937x slave smi";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ ds->slave_mii_bus = bus;
+
+ ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(ds->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ }
+
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+int lan937x_reset_switch(struct ksz_device *dev)
+{
+ u32 data32;
+ int ret;
+
+ /* reset switch */
+ ret = lan937x_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Auto Aging */
+ ret = lan937x_cfg(dev, REG_SW_LUE_CTRL_1, SW_LINK_AUTO_AGING, true);
+ if (ret < 0)
+ return ret;
+
+ /* disable interrupts */
+ ret = ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
+ if (ret < 0)
+ return ret;
+
+ return ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+}
+
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ struct dsa_switch *ds = dev->ds;
+ u8 member;
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_TAIL_TAG_ENABLE, true);
+
+ /* disable frame check length field */
+ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH,
+ false);
+
+ /* set back pressure for half duplex */
+ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
+ true);
+
+ /* enable 802.1p priority */
+ lan937x_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ if (!dev->info->internal_phy[port])
+ lan937x_port_cfg(dev, port, REG_PORT_XMII_CTRL_0,
+ PORT_MII_TX_FLOW_CTRL | PORT_MII_RX_FLOW_CTRL,
+ true);
+
+ if (cpu_port)
+ member = dsa_user_ports(ds);
+ else
+ member = BIT(dsa_upstream_port(ds, port));
+
+ dev->dev_ops->cfg_port_member(dev, port, member);
+}
+
+void lan937x_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dev->info->cpu_ports & (1 << dp->index)) {
+ dev->cpu_port = dp->index;
+
+ /* enable cpu port */
+ lan937x_port_setup(dev, dp->index, true);
+ }
+ }
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ ksz_port_stp_state_set(ds, dp->index, BR_STATE_DISABLED);
+ }
+}
+
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
+{
+ struct dsa_switch *ds = dev->ds;
+ int ret;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port))
+ new_mtu += LAN937X_TAG_LEN;
+
+ if (new_mtu >= FR_MIN_SIZE)
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, true);
+ else
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, false);
+ if (ret < 0) {
+ dev_err(ds->dev, "failed to enable jumbo\n");
+ return ret;
+ }
+
+ /* Write the frame size in PORT_MAX_FR_SIZE register */
+ ksz_pwrite16(dev, port, PORT_MAX_FR_SIZE, new_mtu);
+
+ return 0;
+}
+
+static void lan937x_config_gbit(struct ksz_device *dev, bool gbit, u8 *data)
+{
+ if (gbit)
+ *data &= ~PORT_MII_NOT_1GBIT;
+ else
+ *data |= PORT_MII_NOT_1GBIT;
+}
+
+static void lan937x_mac_config(struct ksz_device *dev, int port,
+ phy_interface_t interface)
+{
+ u8 data8;
+
+ ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
+
+ /* clear MII selection & set it based on interface later */
+ data8 &= ~PORT_MII_SEL_M;
+
+ /* configure MAC based on interface */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ lan937x_config_gbit(dev, false, &data8);
+ data8 |= PORT_MII_SEL;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ lan937x_config_gbit(dev, false, &data8);
+ data8 |= PORT_RMII_SEL;
+ break;
+ default:
+ dev_err(dev->dev, "Unsupported interface '%s' for port %d\n",
+ phy_modes(interface), port);
+ return;
+ }
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
+}
+
+static void lan937x_config_interface(struct ksz_device *dev, int port,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ u8 xmii_ctrl0, xmii_ctrl1;
+
+ ksz_pread8(dev, port, REG_PORT_XMII_CTRL_0, &xmii_ctrl0);
+ ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &xmii_ctrl1);
+
+ xmii_ctrl0 &= ~(PORT_MII_100MBIT | PORT_MII_FULL_DUPLEX |
+ PORT_MII_TX_FLOW_CTRL | PORT_MII_RX_FLOW_CTRL);
+
+ if (speed == SPEED_1000)
+ lan937x_config_gbit(dev, true, &xmii_ctrl1);
+ else
+ lan937x_config_gbit(dev, false, &xmii_ctrl1);
+
+ if (speed == SPEED_100)
+ xmii_ctrl0 |= PORT_MII_100MBIT;
+
+ if (duplex)
+ xmii_ctrl0 |= PORT_MII_FULL_DUPLEX;
+
+ if (tx_pause)
+ xmii_ctrl0 |= PORT_MII_TX_FLOW_CTRL;
+
+ if (rx_pause)
+ xmii_ctrl0 |= PORT_MII_RX_FLOW_CTRL;
+
+ ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_0, xmii_ctrl0);
+ ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, xmii_ctrl1);
+}
+
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_100FD;
+
+ if (dev->info->supports_rgmii[port]) {
+ /* MII/RMII/RGMII ports */
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10 | MAC_1000FD;
+ }
+}
+
+void lan937x_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ lan937x_config_interface(dev, port, speed, duplex,
+ tx_pause, rx_pause);
+}
+
+void lan937x_phylink_mac_config(struct ksz_device *dev, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(dev->dev, "In-band AN not supported!\n");
+ return;
+ }
+
+ lan937x_mac_config(dev, port, state->interface);
+}
+
+int lan937x_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ /* enable Indirect Access from SPI to the VPHY registers */
+ ret = lan937x_enable_spi_indirect_access(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable spi indirect access");
+ return ret;
+ }
+
+ ret = lan937x_mdio_register(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to register the mdio");
+ return ret;
+ }
+
+ /* The VLAN aware is a global setting. Mixed vlan
+ * filterings are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Enable aggressive back off for half duplex & UNH mode */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_0,
+ (SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF),
+ true);
+
+ /* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
+ * packets when 16 or more collisions occur
+ */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+
+ /* enable global MIB counter freeze function */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+
+ /* disable CLK125 & CLK25, 1: disable, 0: enable */
+ lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ (SW_CLK125_ENB | SW_CLK25_ENB), true);
+
+ return 0;
+}
+
+int lan937x_switch_init(struct ksz_device *dev)
+{
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ return 0;
+}
+
+void lan937x_switch_exit(struct ksz_device *dev)
+{
+ lan937x_reset_switch(dev);
+}
+
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("Microchip LAN937x Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
new file mode 100644
index 000000000000..c187d0a3e7fa
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip LAN937X switch register definitions
+ * Copyright (C) 2019-2021 Microchip Technology Inc.
+ */
+#ifndef __LAN937X_REG_H
+#define __LAN937X_REG_H
+
+#define PORT_CTRL_ADDR(port, addr) ((addr) | (((port) + 1) << 12))
+
+/* 0 - Operation */
+#define REG_GLOBAL_CTRL_0 0x0007
+
+#define SW_PHY_REG_BLOCK BIT(7)
+#define SW_FAST_MODE BIT(3)
+#define SW_FAST_MODE_OVERRIDE BIT(2)
+
+#define REG_SW_INT_STATUS__4 0x0010
+#define REG_SW_INT_MASK__4 0x0014
+
+#define LUE_INT BIT(31)
+#define TRIG_TS_INT BIT(30)
+#define APB_TIMEOUT_INT BIT(29)
+#define OVER_TEMP_INT BIT(28)
+#define HSR_INT BIT(27)
+#define PIO_INT BIT(26)
+#define POR_READY_INT BIT(25)
+
+#define SWITCH_INT_MASK \
+ (LUE_INT | TRIG_TS_INT | APB_TIMEOUT_INT | OVER_TEMP_INT | HSR_INT | \
+ PIO_INT | POR_READY_INT)
+
+#define REG_SW_PORT_INT_STATUS__4 0x0018
+#define REG_SW_PORT_INT_MASK__4 0x001C
+
+/* 1 - Global */
+#define REG_SW_GLOBAL_OUTPUT_CTRL__1 0x0103
+#define SW_CLK125_ENB BIT(1)
+#define SW_CLK25_ENB BIT(0)
+
+/* 3 - Operation Control */
+#define REG_SW_OPERATION 0x0300
+
+#define SW_DOUBLE_TAG BIT(7)
+#define SW_OVER_TEMP_ENABLE BIT(2)
+#define SW_RESET BIT(1)
+
+#define REG_SW_LUE_CTRL_0 0x0310
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_DROP_INVALID_VID BIT(6)
+#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_S 3
+#define SW_RESV_MCAST_ENABLE BIT(2)
+
+#define REG_SW_LUE_CTRL_1 0x0311
+
+#define UNICAST_LEARN_DISABLE BIT(7)
+#define SW_FLUSH_STP_TABLE BIT(5)
+#define SW_FLUSH_MSTP_TABLE BIT(4)
+#define SW_SRC_ADDR_FILTER BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_MAC_CTRL_0 0x0330
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_PAUSE_UNH_MODE BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_MAC_CTRL_1 0x0331
+#define SW_SHORT_IFG BIT(7)
+#define MULTICAST_STORM_DISABLE BIT(6)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define SW_PASS_SHORT_FRAME BIT(0)
+
+#define REG_SW_MAC_CTRL_6 0x0336
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+
+/* 4 - LUE */
+#define REG_SW_ALU_STAT_CTRL__4 0x041C
+
+#define REG_SW_ALU_VAL_B 0x0424
+#define ALU_V_OVERRIDE BIT(31)
+#define ALU_V_USE_FID BIT(30)
+#define ALU_V_PORT_MAP 0xFF
+
+/* 7 - VPhy */
+#define REG_VPHY_IND_ADDR__2 0x075C
+#define REG_VPHY_IND_DATA__2 0x0760
+
+#define REG_VPHY_IND_CTRL__2 0x0768
+
+#define VPHY_IND_WRITE BIT(1)
+#define VPHY_IND_BUSY BIT(0)
+
+#define REG_VPHY_SPECIAL_CTRL__2 0x077C
+#define VPHY_SMI_INDIRECT_ENABLE BIT(15)
+#define VPHY_SW_LOOPBACK BIT(14)
+#define VPHY_MDIO_INTERNAL_ENABLE BIT(13)
+#define VPHY_SPI_INDIRECT_ENABLE BIT(12)
+#define VPHY_PORT_MODE_M 0x3
+#define VPHY_PORT_MODE_S 8
+#define VPHY_MODE_RGMII 0
+#define VPHY_MODE_MII_PHY 1
+#define VPHY_MODE_SGMII 2
+#define VPHY_MODE_RMII_PHY 3
+#define VPHY_SW_COLLISION_TEST BIT(7)
+#define VPHY_SPEED_DUPLEX_STAT_M 0x7
+#define VPHY_SPEED_DUPLEX_STAT_S 2
+#define VPHY_SPEED_1000 BIT(4)
+#define VPHY_SPEED_100 BIT(3)
+#define VPHY_FULL_DUPLEX BIT(2)
+
+/* Port Registers */
+
+/* 0 - Operation */
+#define REG_PORT_CTRL_0 0x0020
+
+#define PORT_MAC_LOOPBACK BIT(7)
+#define PORT_MAC_REMOTE_LOOPBACK BIT(6)
+#define PORT_K2L_INSERT_ENABLE BIT(5)
+#define PORT_K2L_DEBUG_ENABLE BIT(4)
+#define PORT_TAIL_TAG_ENABLE BIT(2)
+#define PORT_QUEUE_SPLIT_ENABLE 0x3
+
+/* 1 - Phy */
+#define REG_PORT_T1_PHY_CTRL_BASE 0x0100
+
+/* 3 - xMII */
+#define REG_PORT_XMII_CTRL_0 0x0300
+#define PORT_SGMII_SEL BIT(7)
+#define PORT_MII_FULL_DUPLEX BIT(6)
+#define PORT_MII_TX_FLOW_CTRL BIT(5)
+#define PORT_MII_100MBIT BIT(4)
+#define PORT_MII_RX_FLOW_CTRL BIT(3)
+#define PORT_GRXC_ENABLE BIT(0)
+
+#define REG_PORT_XMII_CTRL_1 0x0301
+#define PORT_MII_NOT_1GBIT BIT(6)
+#define PORT_MII_SEL_EDGE BIT(5)
+#define PORT_RGMII_ID_IG_ENABLE BIT(4)
+#define PORT_RGMII_ID_EG_ENABLE BIT(3)
+#define PORT_MII_MAC_MODE BIT(2)
+#define PORT_MII_SEL_M 0x3
+#define PORT_RGMII_SEL 0x0
+#define PORT_RMII_SEL 0x1
+#define PORT_MII_SEL 0x2
+
+/* 4 - MAC */
+#define REG_PORT_MAC_CTRL_0 0x0400
+#define PORT_CHECK_LENGTH BIT(2)
+#define PORT_BROADCAST_STORM BIT(1)
+#define PORT_JUMBO_PACKET BIT(0)
+
+#define REG_PORT_MAC_CTRL_1 0x0401
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_PASS_ALL BIT(0)
+
+#define PORT_MAX_FR_SIZE 0x404
+#define FR_MIN_SIZE 1522
+
+/* 8 - Classification and Policing */
+#define REG_PORT_MRI_PRIO_CTRL 0x0801
+#define PORT_HIGHEST_PRIO BIT(7)
+#define PORT_OR_PRIO BIT(6)
+#define PORT_MAC_PRIO_ENABLE BIT(4)
+#define PORT_VLAN_PRIO_ENABLE BIT(3)
+#define PORT_802_1P_PRIO_ENABLE BIT(2)
+#define PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+
+#define LAN937X_TAG_LEN 2
+
+#endif
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 0b49d243e00b..37b649501500 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -449,9 +449,6 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
goto restore_link;
}
- if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
- mode = chip->info->ops->port_max_speed_mode(port);
-
if (chip->info->ops->port_set_pause) {
err = chip->info->ops->port_set_pause(chip, port, pause);
if (err)
@@ -3280,28 +3277,51 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct device_node *phy_handle = NULL;
struct dsa_switch *ds = chip->ds;
+ phy_interface_t mode;
struct dsa_port *dp;
- int tx_amp;
+ int tx_amp, speed;
int err;
u16 reg;
chip->ports[port].chip = chip;
chip->ports[port].port = port;
+ dp = dsa_to_port(ds, port);
+
/* MAC Forcing register: don't force link, speed, duplex or flow control
* state to any particular values on physical ports, but force the CPU
* port and all DSA ports to their maximum bandwidth and full duplex.
*/
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ unsigned long caps = dp->pl_config.mac_capabilities;
+
+ if (chip->info->ops->port_max_speed_mode)
+ mode = chip->info->ops->port_max_speed_mode(port);
+ else
+ mode = PHY_INTERFACE_MODE_NA;
+
+ if (caps & MAC_10000FD)
+ speed = SPEED_10000;
+ else if (caps & MAC_5000FD)
+ speed = SPEED_5000;
+ else if (caps & MAC_2500FD)
+ speed = SPEED_2500;
+ else if (caps & MAC_1000)
+ speed = SPEED_1000;
+ else if (caps & MAC_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
- SPEED_MAX, DUPLEX_FULL,
- PAUSE_OFF,
- PHY_INTERFACE_MODE_NA);
- else
+ speed, DUPLEX_FULL,
+ PAUSE_OFF, mode);
+ } else {
err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
SPEED_UNFORCED, DUPLEX_UNFORCED,
PAUSE_ON,
PHY_INTERFACE_MODE_NA);
+ }
if (err)
return err;
@@ -3473,7 +3493,6 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
}
if (chip->info->ops->serdes_set_tx_amplitude) {
- dp = dsa_to_port(ds, port);
if (dp)
phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 5e03cfe50156..e693154cf803 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -488,14 +488,13 @@ struct mv88e6xxx_ops {
int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port,
int pause);
-#define SPEED_MAX INT_MAX
#define SPEED_UNFORCED -2
#define DUPLEX_UNFORCED -2
/* Port's MAC speed (in Mbps) and MAC duplex mode
*
* Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
- * Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
+ * Use SPEED_UNFORCED for normal detection.
*
* Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
* or DUPLEX_UNFORCED for normal duplex detection.
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 795b3128768f..90c55f23b7c9 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -294,28 +294,10 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
return 0;
}
-/* Support 10, 100, 200 Mbps (e.g. 88E6065 family) */
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex)
-{
- if (speed == SPEED_MAX)
- speed = 200;
-
- if (speed > 200)
- return -EOPNOTSUPP;
-
- /* Setting 200 Mbps on port 0 to 3 selects 100 Mbps */
- return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
- duplex);
-}
-
/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed == 200 || speed > 1000)
return -EOPNOTSUPP;
@@ -327,9 +309,6 @@ int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 100;
-
if (speed > 100)
return -EOPNOTSUPP;
@@ -341,9 +320,6 @@ int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 5 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -369,9 +345,6 @@ phy_interface_t mv88e6341_port_max_speed_mode(int port)
int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed > 1000)
return -EOPNOTSUPP;
@@ -386,9 +359,6 @@ int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -414,9 +384,6 @@ phy_interface_t mv88e6390_port_max_speed_mode(int port)
int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 10000;
-
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
@@ -445,9 +412,6 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
u16 reg, ctrl;
int err;
- if (speed == SPEED_MAX)
- speed = (port > 0 && port < 9) ? 1000 : 10000;
-
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index e0a705d82019..cb04243f37c1 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -342,8 +342,6 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex);
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex);
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 220b0b027b55..08db9cf76818 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -6,6 +6,7 @@ config NET_DSA_MSCC_FELIX
depends on NET_VENDOR_FREESCALE
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n
select MSCC_OCELOT_SWITCH_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3e07dc39007a..859196898a7d 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1553,9 +1553,18 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct felix *felix = ocelot_to_felix(ocelot);
ocelot_port_set_maxlen(ocelot, port, new_mtu);
+ mutex_lock(&ocelot->tas_lock);
+
+ if (ocelot_port->taprio && felix->info->tas_guard_bands_update)
+ felix->info->tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
+
return 0;
}
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 9e07eb7ee28d..deb8dde1fc19 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -53,6 +53,7 @@ struct felix_info {
struct phylink_link_state *state);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
+ void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
struct regmap *(*init_regmap)(struct ocelot *ocelot,
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index dd9085ae0922..61ed317602e7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -16,6 +16,7 @@
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/pci.h>
+#include <linux/time.h>
#include "felix.h"
#define VSC9959_NUM_PORTS 6
@@ -1127,9 +1128,199 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_free(felix->imdio);
}
+/* Extract shortest continuous gate open intervals in ns for each traffic class
+ * of a cyclic tc-taprio schedule. If a gate is always open, the duration is
+ * considered U64_MAX. If the gate is always closed, it is considered 0.
+ */
+static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
+ u64 min_gate_len[OCELOT_NUM_TC])
+{
+ struct tc_taprio_sched_entry *entry;
+ u64 gate_len[OCELOT_NUM_TC];
+ int tc, i, n;
+
+ /* Initialize arrays */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ min_gate_len[tc] = U64_MAX;
+ gate_len[tc] = 0;
+ }
+
+ /* If we don't have taprio, consider all gates as permanently open */
+ if (!taprio)
+ return;
+
+ n = taprio->num_entries;
+
+ /* Walk through the gate list twice to determine the length
+ * of consecutively open gates for a traffic class, including
+ * open gates that wrap around. We are just interested in the
+ * minimum window size, and this doesn't change what the
+ * minimum is (if the gate never closes, min_gate_len will
+ * remain U64_MAX).
+ */
+ for (i = 0; i < 2 * n; i++) {
+ entry = &taprio->entries[i % n];
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ if (entry->gate_mask & BIT(tc)) {
+ gate_len[tc] += entry->interval;
+ } else {
+ /* Gate closes now, record a potential new
+ * minimum and reinitialize length
+ */
+ if (min_gate_len[tc] > gate_len[tc])
+ min_gate_len[tc] = gate_len[tc];
+ gate_len[tc] = 0;
+ }
+ }
+ }
+}
+
+/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
+ * switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
+ * values (the default value is 1518). Also, for traffic class windows smaller
+ * than one MTU sized frame, update QSYS_QMAXSDU_CFG to enable oversized frame
+ * dropping, such that these won't hang the port, as they will never be sent.
+ */
+static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u64 min_gate_len[OCELOT_NUM_TC];
+ int speed, picos_per_byte;
+ u64 needed_bit_time_ps;
+ u32 val, maxlen;
+ u8 tas_speed;
+ int tc;
+
+ lockdep_assert_held(&ocelot->tas_lock);
+
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
+
+ switch (tas_speed) {
+ case OCELOT_SPEED_10:
+ speed = SPEED_10;
+ break;
+ case OCELOT_SPEED_100:
+ speed = SPEED_100;
+ break;
+ case OCELOT_SPEED_1000:
+ speed = SPEED_1000;
+ break;
+ case OCELOT_SPEED_2500:
+ speed = SPEED_2500;
+ break;
+ default:
+ return;
+ }
+
+ picos_per_byte = (USEC_PER_SEC * 8) / speed;
+
+ val = ocelot_port_readl(ocelot_port, DEV_MAC_MAXLEN_CFG);
+ /* MAXLEN_CFG accounts automatically for VLAN. We need to include it
+ * manually in the bit time calculation, plus the preamble and SFD.
+ */
+ maxlen = val + 2 * VLAN_HLEN;
+ /* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
+ * 4 octets FCS, 12 octets IFG.
+ */
+ needed_bit_time_ps = (maxlen + 24) * picos_per_byte;
+
+ dev_dbg(ocelot->dev,
+ "port %d: max frame size %d needs %llu ps at speed %d\n",
+ port, maxlen, needed_bit_time_ps, speed);
+
+ vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len);
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u32 max_sdu;
+
+ if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
+ min_gate_len[tc] * PSEC_PER_NSEC > needed_bit_time_ps) {
+ /* Setting QMAXSDU_CFG to 0 disables oversized frame
+ * dropping.
+ */
+ max_sdu = 0;
+ dev_dbg(ocelot->dev,
+ "port %d tc %d min gate len %llu"
+ ", sending all frames\n",
+ port, tc, min_gate_len[tc]);
+ } else {
+ /* If traffic class doesn't support a full MTU sized
+ * frame, make sure to enable oversize frame dropping
+ * for frames larger than the smallest that would fit.
+ */
+ max_sdu = div_u64(min_gate_len[tc] * PSEC_PER_NSEC,
+ picos_per_byte);
+ /* A TC gate may be completely closed, which is a
+ * special case where all packets are oversized.
+ * Any limit smaller than 64 octets accomplishes this
+ */
+ if (!max_sdu)
+ max_sdu = 1;
+ /* Take L1 overhead into account, but just don't allow
+ * max_sdu to go negative or to 0. Here we use 20
+ * because QSYS_MAXSDU_CFG_* already counts the 4 FCS
+ * octets as part of packet size.
+ */
+ if (max_sdu > 20)
+ max_sdu -= 20;
+ dev_info(ocelot->dev,
+ "port %d tc %d min gate length %llu"
+ " ns not enough for max frame size %d at %d"
+ " Mbps, dropping frames over %d"
+ " octets including FCS\n",
+ port, tc, min_gate_len[tc], maxlen, speed,
+ max_sdu);
+ }
+
+ /* ocelot_write_rix is a macro that concatenates
+ * QSYS_MAXSDU_CFG_* with _RSZ, so we need to spell out
+ * the writes to each traffic class
+ */
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+ }
+
+ ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+}
+
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
u32 speed)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 tas_speed;
switch (speed) {
@@ -1154,6 +1345,13 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
+
+ mutex_lock(&ocelot->tas_lock);
+
+ if (ocelot_port->taprio)
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
}
static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
@@ -1204,12 +1402,14 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
mutex_lock(&ocelot->tas_lock);
if (!taprio->enable) {
- ocelot_rmw_rix(ocelot,
- QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
- QSYS_TAG_CONFIG_ENABLE |
- QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
+ taprio_offload_free(ocelot_port->taprio);
+ ocelot_port->taprio = NULL;
+
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
mutex_unlock(&ocelot->tas_lock);
return 0;
}
@@ -1258,8 +1458,6 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M,
QSYS_TAG_CONFIG, port);
- ocelot_port->base_time = taprio->base_time;
-
vsc9959_new_base_time(ocelot, taprio->base_time,
taprio->cycle_time, &base_ts);
ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
@@ -1282,6 +1480,11 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val,
!(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
10, 100000);
+ if (ret)
+ goto err;
+
+ ocelot_port->taprio = taprio_offload_get(taprio);
+ vsc9959_tas_guard_bands_update(ocelot, port);
err:
mutex_unlock(&ocelot->tas_lock);
@@ -1291,17 +1494,18 @@ err:
static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
{
+ struct tc_taprio_qopt_offload *taprio;
struct ocelot_port *ocelot_port;
struct timespec64 base_ts;
- u64 cycletime;
int port;
u32 val;
mutex_lock(&ocelot->tas_lock);
for (port = 0; port < ocelot->num_phys_ports; port++) {
- val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
- if (!(val & QSYS_TAG_CONFIG_ENABLE))
+ ocelot_port = ocelot->ports[port];
+ taprio = ocelot_port->taprio;
+ if (!taprio)
continue;
ocelot_rmw(ocelot,
@@ -1309,17 +1513,12 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
QSYS_TAS_PARAM_CFG_CTRL);
- ocelot_rmw_rix(ocelot,
- QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
- QSYS_TAG_CONFIG_ENABLE |
- QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ /* Disable time-aware shaper */
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
- cycletime = ocelot_read(ocelot, QSYS_PARAM_CFG_REG_4);
- ocelot_port = ocelot->ports[port];
-
- vsc9959_new_base_time(ocelot, ocelot_port->base_time,
- cycletime, &base_ts);
+ vsc9959_new_base_time(ocelot, taprio->base_time,
+ taprio->cycle_time, &base_ts);
ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec),
@@ -1334,11 +1533,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL);
- ocelot_rmw_rix(ocelot,
- QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF) |
+ /* Re-enable time-aware shaper */
+ ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG_ENABLE,
- QSYS_TAG_CONFIG_ENABLE |
- QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
QSYS_TAG_CONFIG, port);
}
mutex_unlock(&ocelot->tas_lock);
@@ -1956,6 +2153,8 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
struct felix_stream_filter_counters *counters)
{
+ mutex_lock(&ocelot->stats_lock);
+
ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
SYS_STAT_CFG_STAT_VIEW_M,
SYS_STAT_CFG);
@@ -1970,6 +2169,8 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
SYS_STAT_CFG_STAT_VIEW(index) |
SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
SYS_STAT_CFG);
+
+ mutex_unlock(&ocelot->stats_lock);
}
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
@@ -2307,6 +2508,7 @@ static const struct felix_info felix_info_vsc9959 = {
.port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
+ .tas_guard_bands_update = vsc9959_tas_guard_bands_update,
.init_regmap = ocelot_regmap_init,
};
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index f23ce56fa591..0796b7cf8cae 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -231,6 +231,7 @@ struct ar9331_sw_port {
int idx;
struct delayed_work mib_read;
struct rtnl_link_stats64 stats;
+ struct ethtool_pause_stats pause_stats;
struct spinlock stats_lock;
};
@@ -604,6 +605,7 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
static void ar9331_read_stats(struct ar9331_sw_port *port)
{
struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
+ struct ethtool_pause_stats *pstats = &port->pause_stats;
struct rtnl_link_stats64 *stats = &port->stats;
struct ar9331_sw_stats_raw raw;
int ret;
@@ -644,6 +646,9 @@ static void ar9331_read_stats(struct ar9331_sw_port *port)
stats->multicast += raw.rxmulti;
stats->collisions += raw.txcollision;
+ pstats->tx_pause_frames += raw.txpause;
+ pstats->rx_pause_frames += raw.rxpause;
+
spin_unlock(&port->stats_lock);
}
@@ -668,6 +673,17 @@ static void ar9331_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
+static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(pause_stats, &p->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&p->stats_lock);
+}
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
@@ -677,6 +693,7 @@ static const struct dsa_switch_ops ar9331_sw_ops = {
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
.get_stats64 = ar9331_get_stats64,
+ .get_pause_stats = ar9331_get_pause_stats,
};
static irqreturn_t ar9331_sw_irq(int irq, void *data)
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 2727d3169c25..1cbb05b0323f 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -2334,6 +2334,7 @@ static int
qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct qca8k_priv *priv = ds->priv;
+ int ret;
/* We have only have a general MTU setting.
* DSA always set the CPU port's MTU to the largest MTU of the slave
@@ -2344,8 +2345,27 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
if (!dsa_is_cpu_port(ds, port))
return 0;
+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+ * the switch panics.
+ * Turn off both cpu ports before applying the new value to prevent
+ * this.
+ */
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 0);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 0);
+
/* Include L2 header / FCS length */
- return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
+
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 1);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 1);
+
+ return ret;
}
static int
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 04408e11402a..ec58d0e80a70 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -15,7 +15,7 @@
#define QCA8K_ETHERNET_MDIO_PRIORITY 7
#define QCA8K_ETHERNET_PHY_PRIORITY 6
-#define QCA8K_ETHERNET_TIMEOUT 100
+#define QCA8K_ETHERNET_TIMEOUT 5
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
new file mode 100644
index 000000000000..0744e8162e1d
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -0,0 +1,1064 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <net/dsa.h>
+
+#include "rzn1_a5psw.h"
+
+struct a5psw_stats {
+ u16 offset;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define STAT_DESC(_offset) { \
+ .offset = A5PSW_##_offset, \
+ .name = __stringify(_offset), \
+}
+
+static const struct a5psw_stats a5psw_stats[] = {
+ STAT_DESC(aFramesTransmittedOK),
+ STAT_DESC(aFramesReceivedOK),
+ STAT_DESC(aFrameCheckSequenceErrors),
+ STAT_DESC(aAlignmentErrors),
+ STAT_DESC(aOctetsTransmittedOK),
+ STAT_DESC(aOctetsReceivedOK),
+ STAT_DESC(aTxPAUSEMACCtrlFrames),
+ STAT_DESC(aRxPAUSEMACCtrlFrames),
+ STAT_DESC(ifInErrors),
+ STAT_DESC(ifOutErrors),
+ STAT_DESC(ifInUcastPkts),
+ STAT_DESC(ifInMulticastPkts),
+ STAT_DESC(ifInBroadcastPkts),
+ STAT_DESC(ifOutDiscards),
+ STAT_DESC(ifOutUcastPkts),
+ STAT_DESC(ifOutMulticastPkts),
+ STAT_DESC(ifOutBroadcastPkts),
+ STAT_DESC(etherStatsDropEvents),
+ STAT_DESC(etherStatsOctets),
+ STAT_DESC(etherStatsPkts),
+ STAT_DESC(etherStatsUndersizePkts),
+ STAT_DESC(etherStatsOversizePkts),
+ STAT_DESC(etherStatsPkts64Octets),
+ STAT_DESC(etherStatsPkts65to127Octets),
+ STAT_DESC(etherStatsPkts128to255Octets),
+ STAT_DESC(etherStatsPkts256to511Octets),
+ STAT_DESC(etherStatsPkts1024to1518Octets),
+ STAT_DESC(etherStatsPkts1519toXOctets),
+ STAT_DESC(etherStatsJabbers),
+ STAT_DESC(etherStatsFragments),
+ STAT_DESC(VLANReceived),
+ STAT_DESC(VLANTransmitted),
+ STAT_DESC(aDeferred),
+ STAT_DESC(aMultipleCollisions),
+ STAT_DESC(aSingleCollisions),
+ STAT_DESC(aLateCollisions),
+ STAT_DESC(aExcessiveCollisions),
+ STAT_DESC(aCarrierSenseErrors),
+};
+
+static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
+{
+ writel(value, a5psw->base + offset);
+}
+
+static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
+{
+ return readl(a5psw->base + offset);
+}
+
+static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ spin_lock(&a5psw->reg_lock);
+
+ reg = a5psw_reg_readl(a5psw, offset);
+ reg &= ~mask;
+ reg |= val;
+ a5psw_reg_writel(a5psw, offset, reg);
+
+ spin_unlock(&a5psw->reg_lock);
+}
+
+static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RZN1_A5PSW;
+}
+
+static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
+ bool enable)
+{
+ u32 rx_match = 0;
+
+ if (enable)
+ rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
+
+ a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
+ A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
+}
+
+static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
+{
+ /* Enable "management forward" pattern matching, this will forward
+ * packets from this port only towards the management port and thus
+ * isolate the port.
+ */
+ a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
+}
+
+static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
+{
+ u32 port_ena = 0;
+
+ if (enable)
+ port_ena |= A5PSW_PORT_ENA_TX_RX(port);
+
+ a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
+ port_ena);
+}
+
+static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
+{
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
+ !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret)
+ dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
+
+ return ret;
+}
+
+static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
+{
+ u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+ a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ mutex_unlock(&a5psw->lk_lock);
+}
+
+static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
+ bool authorize)
+{
+ u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
+
+ if (authorize)
+ reg |= A5PSW_AUTH_PORT_AUTHORIZED;
+ else
+ reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
+
+ a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
+}
+
+static void a5psw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, false);
+ a5psw_port_enable_set(a5psw, port, false);
+}
+
+static int a5psw_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, true);
+ a5psw_port_enable_set(a5psw, port, true);
+
+ return 0;
+}
+
+static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
+ a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
+
+ return 0;
+}
+
+static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return A5PSW_MAX_MTU;
+}
+
+static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *intf = config->supported_interfaces;
+
+ config->mac_capabilities = MAC_1000FD;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* GMII is used internally and GMAC2 is connected to the switch
+ * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, intf);
+ } else {
+ config->mac_capabilities |= MAC_100 | MAC_10;
+ phy_interface_set_rgmii(intf);
+ __set_bit(PHY_INTERFACE_MODE_RMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_MII, intf);
+ }
+}
+
+static struct phylink_pcs *
+a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct a5psw *a5psw = ds->priv;
+
+ if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
+ return a5psw->pcs[port];
+
+ return NULL;
+}
+
+static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct a5psw *a5psw = ds->priv;
+ u32 cmd_cfg;
+
+ cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
+ cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
+ A5PSW_CMD_CFG_TX_CRC_APPEND;
+ struct a5psw *a5psw = ds->priv;
+
+ if (speed == SPEED_1000)
+ cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
+
+ if (duplex == DUPLEX_HALF)
+ cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
+
+ cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
+
+ if (!rx_pause)
+ cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
+
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned long rate;
+ u64 max, tmp;
+ u32 agetime;
+
+ rate = clk_get_rate(a5psw->clk);
+ max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
+ rate) * 1000;
+ if (msecs > max)
+ return -EINVAL;
+
+ tmp = div_u64(rate, MSEC_PER_SEC);
+ agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
+
+ return 0;
+}
+
+static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
+ bool set)
+{
+ u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
+ A5PSW_MCAST_DEF_MASK};
+ int i;
+
+ if (set)
+ a5psw->bridged_ports |= BIT(port);
+ else
+ a5psw->bridged_ports &= ~BIT(port);
+
+ for (i = 0; i < ARRAY_SIZE(offsets); i++)
+ a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
+}
+
+static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ /* We only support 1 bridge device */
+ if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Forwarding offload supported for a single bridge");
+ return -EOPNOTSUPP;
+ }
+
+ a5psw->br_dev = bridge.dev;
+ a5psw_flooding_set_resolution(a5psw, port, true);
+ a5psw_port_mgmtfwd_set(a5psw, port, false);
+
+ return 0;
+}
+
+static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_flooding_set_resolution(a5psw, port, false);
+ a5psw_port_mgmtfwd_set(a5psw, port, true);
+
+ /* No more ports bridged */
+ if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
+ a5psw->br_dev = NULL;
+}
+
+static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
+ struct a5psw *a5psw = ds->priv;
+ u32 reg = 0;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ reg |= A5PSW_INPUT_LEARN_DIS(port);
+ reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+ break;
+ case BR_STATE_LISTENING:
+ reg |= A5PSW_INPUT_LEARN_DIS(port);
+ break;
+ case BR_STATE_LEARNING:
+ reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ break;
+ }
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_fdb_flush(a5psw, port);
+}
+
+static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
+ u16 *entry)
+{
+ u32 ctrl;
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
+
+ ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
+ ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ if (ret)
+ return ret;
+
+ *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
+
+ return 0;
+}
+
+static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool inc_learncount = false;
+ int ret = 0;
+ u16 entry;
+ u32 reg;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+ lk_data.entry.port_mask = BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ /* Set the value to be written in the lookup table */
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ if (!lk_data.entry.valid) {
+ inc_learncount = true;
+ /* port_mask set to 0x1f when entry is not valid, clear it */
+ lk_data.entry.port_mask = 0;
+ lk_data.entry.prio = 0;
+ }
+
+ lk_data.entry.port_mask |= BIT(port);
+ lk_data.entry.is_static = 1;
+ lk_data.entry.valid = 1;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ if (inc_learncount) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool clear = false;
+ u16 entry;
+ u32 reg;
+ int ret;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+
+ /* Our hardware does not associate any VID to the FDB entries so this
+ * means that if two entries were added for the same mac but for
+ * different VID, then, on the deletion of the first one, we would also
+ * delete the second one. Since there is unfortunately nothing we can do
+ * about that, do not return an error...
+ */
+ if (!lk_data.entry.valid)
+ goto lk_unlock;
+
+ lk_data.entry.port_mask &= ~BIT(port);
+ /* If there is no more port in the mask, clear the entry */
+ if (lk_data.entry.port_mask == 0)
+ clear = true;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = entry;
+ if (clear)
+ reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
+ else
+ reg |= A5PSW_LK_ADDR_CTRL_WRITE;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ /* Decrement LEARNCOUNT */
+ if (clear) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data;
+ int i = 0, ret = 0;
+ u32 reg;
+
+ mutex_lock(&a5psw->lk_lock);
+
+ for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
+ reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto out_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ /* If entry is not valid or does not contain the port, skip */
+ if (!lk_data.entry.valid ||
+ !(lk_data.entry.port_mask & BIT(port)))
+ continue;
+
+ lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
+
+ ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
+ if (ret)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
+{
+ u32 reg_lo, reg_hi;
+
+ reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
+ /* A5PSW_STATS_HIWORD is latched on stat read */
+ reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
+
+ return ((u64)reg_hi << 32) | reg_lo;
+}
+
+static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ unsigned int u;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
+ memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned int u;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
+ data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
+}
+
+static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(a5psw_stats);
+}
+
+static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
+ mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
+ mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
+ mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
+ mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
+ mac_stats->AlignmentErrors = RD(aAlignmentErrors);
+ mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
+ mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
+ mac_stats->LateCollisions = RD(aLateCollisions);
+ mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
+ mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
+ mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
+ mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
+ mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
+ mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
+ mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
+ mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
+ mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
+ mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
+#undef RD
+}
+
+static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, A5PSW_MAX_MTU },
+ {}
+};
+
+static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
+ rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
+ rmon_stats->fragments = RD(etherStatsFragments);
+ rmon_stats->jabbers = RD(etherStatsJabbers);
+ rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
+ rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
+ rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
+ rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
+ rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
+ rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
+ rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
+#undef RD
+
+ *ranges = a5psw_rmon_ranges;
+}
+
+static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+ u64 stat;
+
+ stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesTransmitted = stat;
+ stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesReceived = stat;
+}
+
+static int a5psw_setup(struct dsa_switch *ds)
+{
+ struct a5psw *a5psw = ds->priv;
+ int port, vlan, ret;
+ struct dsa_port *dp;
+ u32 reg;
+
+ /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dp->index != A5PSW_CPU_PORT) {
+ dev_err(a5psw->dev, "Invalid CPU port\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Configure management port */
+ reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
+
+ /* Set pattern 0 to forward all frame to mgmt port */
+ a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
+ A5PSW_PATTERN_CTRL_MGMTFWD);
+
+ /* Enable port tagging */
+ reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
+ reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
+
+ /* Enable normal switch operation */
+ reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
+ A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
+ A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
+ a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
+ !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret) {
+ dev_err(a5psw->dev, "Failed to clear lookup table\n");
+ return ret;
+ }
+
+ /* Reset learn count to 0 */
+ reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+
+ /* Clear VLAN resource table */
+ reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
+ for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
+ a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
+
+ /* Reset all ports */
+ dsa_switch_for_each_port(dp, ds) {
+ port = dp->index;
+
+ /* Reset the port */
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
+ A5PSW_CMD_CFG_SW_RESET);
+
+ /* Enable only CPU port */
+ a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
+
+ if (dsa_port_is_unused(dp))
+ continue;
+
+ /* Enable egress flooding for CPU port */
+ if (dsa_port_is_cpu(dp))
+ a5psw_flooding_set_resolution(a5psw, port, true);
+
+ /* Enable management forward only for user ports */
+ if (dsa_port_is_user(dp))
+ a5psw_port_mgmtfwd_set(a5psw, port, true);
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops a5psw_switch_ops = {
+ .get_tag_protocol = a5psw_get_tag_protocol,
+ .setup = a5psw_setup,
+ .port_disable = a5psw_port_disable,
+ .port_enable = a5psw_port_enable,
+ .phylink_get_caps = a5psw_phylink_get_caps,
+ .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
+ .phylink_mac_link_down = a5psw_phylink_mac_link_down,
+ .phylink_mac_link_up = a5psw_phylink_mac_link_up,
+ .port_change_mtu = a5psw_port_change_mtu,
+ .port_max_mtu = a5psw_port_max_mtu,
+ .get_sset_count = a5psw_get_sset_count,
+ .get_strings = a5psw_get_strings,
+ .get_ethtool_stats = a5psw_get_ethtool_stats,
+ .get_eth_mac_stats = a5psw_get_eth_mac_stats,
+ .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
+ .get_rmon_stats = a5psw_get_rmon_stats,
+ .set_ageing_time = a5psw_set_ageing_time,
+ .port_bridge_join = a5psw_port_bridge_join,
+ .port_bridge_leave = a5psw_port_bridge_leave,
+ .port_stp_state_set = a5psw_port_stp_state_set,
+ .port_fast_age = a5psw_port_fast_age,
+ .port_fdb_add = a5psw_port_fdb_add,
+ .port_fdb_del = a5psw_port_fdb_del,
+ .port_fdb_dump = a5psw_port_fdb_dump,
+};
+
+static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
+{
+ u32 status;
+ int err;
+
+ err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
+ !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
+ 1000 * USEC_PER_MSEC);
+ if (err)
+ dev_err(a5psw->dev, "MDIO command timeout\n");
+
+ return err;
+}
+
+static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd, status;
+ int ret;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ cmd = A5PSW_MDIO_COMMAND_READ;
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+
+ ret = a5psw_mdio_wait_busy(a5psw);
+ if (ret)
+ return ret;
+
+ ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
+
+ status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
+ if (status & A5PSW_MDIO_CFG_STATUS_READERR)
+ return -EIO;
+
+ return ret;
+}
+
+static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
+ u16 phy_data)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
+
+ return a5psw_mdio_wait_busy(a5psw);
+}
+
+static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
+{
+ unsigned long rate;
+ unsigned long div;
+ u32 cfgstatus;
+
+ rate = clk_get_rate(a5psw->hclk);
+ div = ((rate / mdio_freq) / 2);
+ if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
+ div < A5PSW_MDIO_CLK_DIV_MIN) {
+ dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
+ return -ERANGE;
+ }
+
+ cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
+
+ return 0;
+}
+
+static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
+{
+ struct device *dev = a5psw->dev;
+ struct mii_bus *bus;
+ u32 mdio_freq;
+ int ret;
+
+ if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
+ mdio_freq = A5PSW_MDIO_DEF_FREQ;
+
+ ret = a5psw_mdio_config(a5psw, mdio_freq);
+ if (ret)
+ return ret;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "a5psw_mdio";
+ bus->read = a5psw_mdio_read;
+ bus->write = a5psw_mdio_write;
+ bus->priv = a5psw;
+ bus->parent = dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ a5psw->mii_bus = bus;
+
+ return devm_of_mdiobus_register(dev, bus, node);
+}
+
+static void a5psw_pcs_free(struct a5psw *a5psw)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
+ if (a5psw->pcs[i])
+ miic_destroy(a5psw->pcs[i]);
+ }
+}
+
+static int a5psw_pcs_get(struct a5psw *a5psw)
+{
+ struct device_node *ports, *port, *pcs_node;
+ struct phylink_pcs *pcs;
+ int ret;
+ u32 reg;
+
+ ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ pcs_node = of_parse_phandle(port, "pcs-handle", 0);
+ if (!pcs_node)
+ continue;
+
+ if (of_property_read_u32(port, "reg", &reg)) {
+ ret = -EINVAL;
+ goto free_pcs;
+ }
+
+ if (reg >= ARRAY_SIZE(a5psw->pcs)) {
+ ret = -ENODEV;
+ goto free_pcs;
+ }
+
+ pcs = miic_create(a5psw->dev, pcs_node);
+ if (IS_ERR(pcs)) {
+ dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
+ reg);
+ ret = PTR_ERR(pcs);
+ goto free_pcs;
+ }
+
+ a5psw->pcs[reg] = pcs;
+ of_node_put(pcs_node);
+ }
+ of_node_put(ports);
+
+ return 0;
+
+free_pcs:
+ of_node_put(pcs_node);
+ of_node_put(port);
+ of_node_put(ports);
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio;
+ struct dsa_switch *ds;
+ struct a5psw *a5psw;
+ int ret;
+
+ a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
+ if (!a5psw)
+ return -ENOMEM;
+
+ a5psw->dev = dev;
+ mutex_init(&a5psw->lk_lock);
+ spin_lock_init(&a5psw->reg_lock);
+ a5psw->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(a5psw->base))
+ return PTR_ERR(a5psw->base);
+
+ ret = a5psw_pcs_get(a5psw);
+ if (ret)
+ return ret;
+
+ a5psw->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(a5psw->hclk)) {
+ dev_err(dev, "failed get hclk clock\n");
+ ret = PTR_ERR(a5psw->hclk);
+ goto free_pcs;
+ }
+
+ a5psw->clk = devm_clk_get(dev, "clk");
+ if (IS_ERR(a5psw->clk)) {
+ dev_err(dev, "failed get clk_switch clock\n");
+ ret = PTR_ERR(a5psw->clk);
+ goto free_pcs;
+ }
+
+ ret = clk_prepare_enable(a5psw->clk);
+ if (ret)
+ goto free_pcs;
+
+ ret = clk_prepare_enable(a5psw->hclk);
+ if (ret)
+ goto clk_disable;
+
+ mdio = of_get_child_by_name(dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ ret = a5psw_probe_mdio(a5psw, mdio);
+ if (ret) {
+ of_node_put(mdio);
+ dev_err(dev, "Failed to register MDIO: %d\n", ret);
+ goto hclk_disable;
+ }
+ }
+
+ of_node_put(mdio);
+
+ ds = &a5psw->ds;
+ ds->dev = dev;
+ ds->num_ports = A5PSW_PORTS_NUM;
+ ds->ops = &a5psw_switch_ops;
+ ds->priv = a5psw;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err(dev, "Failed to register DSA switch: %d\n", ret);
+ goto hclk_disable;
+ }
+
+ return 0;
+
+hclk_disable:
+ clk_disable_unprepare(a5psw->hclk);
+clk_disable:
+ clk_disable_unprepare(a5psw->clk);
+free_pcs:
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_remove(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return 0;
+
+ dsa_unregister_switch(&a5psw->ds);
+ a5psw_pcs_free(a5psw);
+ clk_disable_unprepare(a5psw->hclk);
+ clk_disable_unprepare(a5psw->clk);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void a5psw_shutdown(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return;
+
+ dsa_switch_shutdown(&a5psw->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id a5psw_of_mtable[] = {
+ { .compatible = "renesas,rzn1-a5psw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
+
+static struct platform_driver a5psw_driver = {
+ .driver = {
+ .name = "rzn1_a5psw",
+ .of_match_table = of_match_ptr(a5psw_of_mtable),
+ },
+ .probe = a5psw_probe,
+ .remove = a5psw_remove,
+ .shutdown = a5psw_shutdown,
+};
+module_platform_driver(a5psw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h
new file mode 100644
index 000000000000..c67abd49c013
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <net/dsa.h>
+
+#define A5PSW_REVISION 0x0
+#define A5PSW_PORT_OFFSET(port) (0x400 * (port))
+
+#define A5PSW_PORT_ENA 0x8
+#define A5PSW_PORT_ENA_RX_SHIFT 16
+#define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
+ BIT(port))
+#define A5PSW_UCAST_DEF_MASK 0xC
+
+#define A5PSW_VLAN_VERIFY 0x10
+#define A5PSW_VLAN_VERI_SHIFT 0
+#define A5PSW_VLAN_DISC_SHIFT 16
+
+#define A5PSW_BCAST_DEF_MASK 0x14
+#define A5PSW_MCAST_DEF_MASK 0x18
+
+#define A5PSW_INPUT_LEARN 0x1C
+#define A5PSW_INPUT_LEARN_DIS(p) BIT((p) + 16)
+#define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p)
+
+#define A5PSW_MGMT_CFG 0x20
+#define A5PSW_MGMT_CFG_DISCARD BIT(7)
+
+#define A5PSW_MODE_CFG 0x24
+#define A5PSW_MODE_STATS_RESET BIT(31)
+
+#define A5PSW_VLAN_IN_MODE 0x28
+#define A5PSW_VLAN_IN_MODE_PORT_SHIFT(port) ((port) * 2)
+#define A5PSW_VLAN_IN_MODE_PORT(port) (GENMASK(1, 0) << \
+ A5PSW_VLAN_IN_MODE_PORT_SHIFT(port))
+#define A5PSW_VLAN_IN_MODE_SINGLE_PASSTHROUGH 0x0
+#define A5PSW_VLAN_IN_MODE_SINGLE_REPLACE 0x1
+#define A5PSW_VLAN_IN_MODE_TAG_ALWAYS 0x2
+
+#define A5PSW_VLAN_OUT_MODE 0x2C
+#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << ((port) * 2))
+#define A5PSW_VLAN_OUT_MODE_DIS 0x0
+#define A5PSW_VLAN_OUT_MODE_STRIP 0x1
+#define A5PSW_VLAN_OUT_MODE_TAG_THROUGH 0x2
+#define A5PSW_VLAN_OUT_MODE_TRANSPARENT 0x3
+
+#define A5PSW_VLAN_IN_MODE_ENA 0x30
+#define A5PSW_VLAN_TAG_ID 0x34
+
+#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_AUTH_PORT(port) (0x240 + 4 * (port))
+#define A5PSW_AUTH_PORT_AUTHORIZED BIT(0)
+
+#define A5PSW_VLAN_RES(entry) (0x280 + 4 * (entry))
+#define A5PSW_VLAN_RES_WR_PORTMASK BIT(30)
+#define A5PSW_VLAN_RES_WR_TAGMASK BIT(29)
+#define A5PSW_VLAN_RES_RD_TAGMASK BIT(28)
+#define A5PSW_VLAN_RES_ID GENMASK(16, 5)
+#define A5PSW_VLAN_RES_PORTMASK GENMASK(4, 0)
+
+#define A5PSW_RXMATCH_CONFIG(port) (0x3e80 + 4 * (port))
+#define A5PSW_RXMATCH_CONFIG_PATTERN(p) BIT(p)
+
+#define A5PSW_PATTERN_CTRL(p) (0x3eb0 + 4 * (p))
+#define A5PSW_PATTERN_CTRL_MGMTFWD BIT(1)
+
+#define A5PSW_LK_CTRL 0x400
+#define A5PSW_LK_ADDR_CTRL_BLOCKING BIT(0)
+#define A5PSW_LK_ADDR_CTRL_LEARNING BIT(1)
+#define A5PSW_LK_ADDR_CTRL_AGEING BIT(2)
+#define A5PSW_LK_ADDR_CTRL_ALLOW_MIGR BIT(3)
+#define A5PSW_LK_ADDR_CTRL_CLEAR_TABLE BIT(6)
+
+#define A5PSW_LK_ADDR_CTRL 0x408
+#define A5PSW_LK_ADDR_CTRL_BUSY BIT(31)
+#define A5PSW_LK_ADDR_CTRL_DELETE_PORT BIT(30)
+#define A5PSW_LK_ADDR_CTRL_CLEAR BIT(29)
+#define A5PSW_LK_ADDR_CTRL_LOOKUP BIT(28)
+#define A5PSW_LK_ADDR_CTRL_WAIT BIT(27)
+#define A5PSW_LK_ADDR_CTRL_READ BIT(26)
+#define A5PSW_LK_ADDR_CTRL_WRITE BIT(25)
+#define A5PSW_LK_ADDR_CTRL_ADDRESS GENMASK(12, 0)
+
+#define A5PSW_LK_DATA_LO 0x40C
+#define A5PSW_LK_DATA_HI 0x410
+#define A5PSW_LK_DATA_HI_VALID BIT(16)
+#define A5PSW_LK_DATA_HI_PORT BIT(16)
+
+#define A5PSW_LK_LEARNCOUNT 0x418
+#define A5PSW_LK_LEARNCOUNT_COUNT GENMASK(13, 0)
+#define A5PSW_LK_LEARNCOUNT_MODE GENMASK(31, 30)
+#define A5PSW_LK_LEARNCOUNT_MODE_SET 0x0
+#define A5PSW_LK_LEARNCOUNT_MODE_INC 0x1
+#define A5PSW_LK_LEARNCOUNT_MODE_DEC 0x2
+
+#define A5PSW_MGMT_TAG_CFG 0x480
+#define A5PSW_MGMT_TAG_CFG_TAGFIELD GENMASK(31, 16)
+#define A5PSW_MGMT_TAG_CFG_ALL_FRAMES BIT(1)
+#define A5PSW_MGMT_TAG_CFG_ENABLE BIT(0)
+
+#define A5PSW_LK_AGETIME 0x41C
+#define A5PSW_LK_AGETIME_MASK GENMASK(23, 0)
+
+#define A5PSW_MDIO_CFG_STATUS 0x700
+#define A5PSW_MDIO_CFG_STATUS_CLKDIV GENMASK(15, 7)
+#define A5PSW_MDIO_CFG_STATUS_READERR BIT(1)
+#define A5PSW_MDIO_CFG_STATUS_BUSY BIT(0)
+
+#define A5PSW_MDIO_COMMAND 0x704
+/* Register is named TRAININIT in datasheet and should be set when reading */
+#define A5PSW_MDIO_COMMAND_READ BIT(15)
+#define A5PSW_MDIO_COMMAND_PHY_ADDR GENMASK(9, 5)
+#define A5PSW_MDIO_COMMAND_REG_ADDR GENMASK(4, 0)
+
+#define A5PSW_MDIO_DATA 0x708
+#define A5PSW_MDIO_DATA_MASK GENMASK(15, 0)
+
+#define A5PSW_CMD_CFG(port) (0x808 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_CMD_CFG_CNTL_FRM_ENA BIT(23)
+#define A5PSW_CMD_CFG_SW_RESET BIT(13)
+#define A5PSW_CMD_CFG_TX_CRC_APPEND BIT(11)
+#define A5PSW_CMD_CFG_HD_ENA BIT(10)
+#define A5PSW_CMD_CFG_PAUSE_IGNORE BIT(8)
+#define A5PSW_CMD_CFG_CRC_FWD BIT(6)
+#define A5PSW_CMD_CFG_ETH_SPEED BIT(3)
+#define A5PSW_CMD_CFG_RX_ENA BIT(1)
+#define A5PSW_CMD_CFG_TX_ENA BIT(0)
+
+#define A5PSW_FRM_LENGTH(port) (0x814 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_FRM_LENGTH_MASK GENMASK(13, 0)
+
+#define A5PSW_STATUS(port) (0x840 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_STATS_HIWORD 0x900
+
+/* Stats */
+#define A5PSW_aFramesTransmittedOK 0x868
+#define A5PSW_aFramesReceivedOK 0x86C
+#define A5PSW_aFrameCheckSequenceErrors 0x870
+#define A5PSW_aAlignmentErrors 0x874
+#define A5PSW_aOctetsTransmittedOK 0x878
+#define A5PSW_aOctetsReceivedOK 0x87C
+#define A5PSW_aTxPAUSEMACCtrlFrames 0x880
+#define A5PSW_aRxPAUSEMACCtrlFrames 0x884
+/* If */
+#define A5PSW_ifInErrors 0x888
+#define A5PSW_ifOutErrors 0x88C
+#define A5PSW_ifInUcastPkts 0x890
+#define A5PSW_ifInMulticastPkts 0x894
+#define A5PSW_ifInBroadcastPkts 0x898
+#define A5PSW_ifOutDiscards 0x89C
+#define A5PSW_ifOutUcastPkts 0x8A0
+#define A5PSW_ifOutMulticastPkts 0x8A4
+#define A5PSW_ifOutBroadcastPkts 0x8A8
+/* Ether */
+#define A5PSW_etherStatsDropEvents 0x8AC
+#define A5PSW_etherStatsOctets 0x8B0
+#define A5PSW_etherStatsPkts 0x8B4
+#define A5PSW_etherStatsUndersizePkts 0x8B8
+#define A5PSW_etherStatsOversizePkts 0x8BC
+#define A5PSW_etherStatsPkts64Octets 0x8C0
+#define A5PSW_etherStatsPkts65to127Octets 0x8C4
+#define A5PSW_etherStatsPkts128to255Octets 0x8C8
+#define A5PSW_etherStatsPkts256to511Octets 0x8CC
+#define A5PSW_etherStatsPkts512to1023Octets 0x8D0
+#define A5PSW_etherStatsPkts1024to1518Octets 0x8D4
+#define A5PSW_etherStatsPkts1519toXOctets 0x8D8
+#define A5PSW_etherStatsJabbers 0x8DC
+#define A5PSW_etherStatsFragments 0x8E0
+
+#define A5PSW_VLANReceived 0x8E8
+#define A5PSW_VLANTransmitted 0x8EC
+
+#define A5PSW_aDeferred 0x910
+#define A5PSW_aMultipleCollisions 0x914
+#define A5PSW_aSingleCollisions 0x918
+#define A5PSW_aLateCollisions 0x91C
+#define A5PSW_aExcessiveCollisions 0x920
+#define A5PSW_aCarrierSenseErrors 0x924
+
+#define A5PSW_VLAN_TAG(prio, id) (((prio) << 12) | (id))
+#define A5PSW_PORTS_NUM 5
+#define A5PSW_CPU_PORT (A5PSW_PORTS_NUM - 1)
+#define A5PSW_MDIO_DEF_FREQ 2500000
+#define A5PSW_MDIO_TIMEOUT 100
+#define A5PSW_JUMBO_LEN (10 * SZ_1K)
+#define A5PSW_MDIO_CLK_DIV_MIN 5
+#define A5PSW_TAG_LEN 8
+#define A5PSW_VLAN_COUNT 32
+
+/* Ensure enough space for 2 VLAN tags */
+#define A5PSW_EXTRA_MTU_LEN (A5PSW_TAG_LEN + 8)
+#define A5PSW_MAX_MTU (A5PSW_JUMBO_LEN - A5PSW_EXTRA_MTU_LEN)
+
+#define A5PSW_PATTERN_MGMTFWD 0
+
+#define A5PSW_LK_BUSY_USEC_POLL 10
+#define A5PSW_CTRL_TIMEOUT 1000
+#define A5PSW_TABLE_ENTRIES 8192
+
+struct fdb_entry {
+ u8 mac[ETH_ALEN];
+ u16 valid:1;
+ u16 is_static:1;
+ u16 prio:3;
+ u16 port_mask:5;
+ u16 reserved:6;
+} __packed;
+
+union lk_data {
+ struct {
+ u32 lo;
+ u32 hi;
+ };
+ struct fdb_entry entry;
+};
+
+/**
+ * struct a5psw - switch struct
+ * @base: Base address of the switch
+ * @hclk: hclk_switch clock
+ * @clk: clk_switch clock
+ * @dev: Device associated to the switch
+ * @mii_bus: MDIO bus struct
+ * @mdio_freq: MDIO bus frequency requested
+ * @pcs: Array of PCS connected to the switch ports (not for the CPU)
+ * @ds: DSA switch struct
+ * @stats_lock: lock to access statistics (shared HI counter)
+ * @lk_lock: Lock for the lookup table
+ * @reg_lock: Lock for register read-modify-write operation
+ * @bridged_ports: Mask of ports that are bridged and should be flooded
+ * @br_dev: Bridge net device
+ */
+struct a5psw {
+ void __iomem *base;
+ struct clk *hclk;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *mii_bus;
+ struct phylink_pcs *pcs[A5PSW_PORTS_NUM - 1];
+ struct dsa_switch ds;
+ struct mutex lk_lock;
+ spinlock_t reg_lock;
+ u32 bridged_ports;
+ struct net_device *br_dev;
+};
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 955abbc5490e..9a55c1d5a0a1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -84,6 +84,7 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
+source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 9eb01169957f..c06e75ed4231 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -97,6 +97,7 @@ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index fbf4588994ac..d19d1579c415 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -1106,7 +1106,7 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0, &rxmac->mif_ctrl);
writel(0, &rxmac->space_avail);
- /* Initialize the the mif_ctrl register
+ /* Initialize the mif_ctrl register
* bit 3: Receive code error. One or more nibbles were signaled as
* errors during the reception of the packet. Clear this
* bit in Gigabit, set it in 100Mbit. This was derived
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 4d46780fad13..f342bb853189 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1673,12 +1673,10 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
return ret;
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
- packet->header_len = skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
+ packet->header_len = skb_inner_tcp_all_headers(skb);
packet->tcp_header_len = inner_tcp_hdrlen(skb);
} else {
- packet->header_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ packet->header_len = skb_tcp_all_headers(skb);
packet->tcp_header_len = tcp_hdrlen(skb);
}
packet->tcp_payload_len = skb->len - packet->header_len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d9547552ceef..b875c430222e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -417,7 +417,7 @@ struct xgbe_rx_ring_data {
/* Structure used to hold information related to the descriptor
* and the packet associated with the descriptor (always use
- * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
+ * the XGBE_GET_DESC_DATA macro to access this data from the ring)
*/
struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
index b6119dcc3bb9..c2fda80fe1cc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
@@ -158,7 +158,7 @@ struct aq_mss_egress_class_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! 0: don't care and no LLC header exist.
@@ -422,7 +422,7 @@ struct aq_mss_ingress_preclass_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! Mask is per-byte.
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 1c6ea6766aa1..e461f4764066 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -786,7 +786,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
return false;
}
-static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
{
struct ag71xx_ring *ring = &ag->tx_ring;
int sent = 0, bytes_compl = 0, n = 0;
@@ -825,7 +825,7 @@ static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
if (!skb)
continue;
- dev_kfree_skb_any(skb);
+ napi_consume_skb(skb, budget);
ring->buf[i].tx.skb = NULL;
bytes_compl += ring->buf[i].tx.len;
@@ -970,7 +970,7 @@ static void ag71xx_fast_reset(struct ag71xx *ag)
mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
- ag71xx_tx_packets(ag, true);
+ ag71xx_tx_packets(ag, true, 0);
reset_control_assert(ag->mac_reset);
usleep_range(10, 20);
@@ -1657,7 +1657,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pktlen;
- skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
+ skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
@@ -1703,7 +1703,7 @@ static int ag71xx_poll(struct napi_struct *napi, int limit)
int tx_done, rx_done;
u32 status;
- tx_done = ag71xx_tx_packets(ag, false);
+ tx_done = ag71xx_tx_packets(ag, false, limit);
netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
rx_done = ag71xx_rx_packets(ag, limit);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 49459397993e..948584761e66 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2072,7 +2072,7 @@ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
tpd_req = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb)) {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb))
tpd_req++;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -2107,7 +2107,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2132,7 +2132,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
*tpd = atl1c_get_tpd(adapter, queue);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2219,7 +2219,8 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
if (tso) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2849,7 +2850,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 20681860a599..57a51fb7746c 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1609,8 +1609,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
if (skb_is_gso(skb)) {
if (skb->protocol == htons(ETH_P_IP) ||
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
- proto_hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb)) {
tpd_req += ((skb_headlen(skb) - proto_hdr_len +
MAX_TX_BUF_LEN - 1) >>
@@ -1645,7 +1644,7 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
netdev_warn(adapter->netdev,
@@ -1713,7 +1712,8 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (segment) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
@@ -2482,7 +2482,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 6a969969d221..ff1fe09abf9f 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2115,7 +2115,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
ntohs(iph->tot_len));
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->len == hdr_len) {
iph->check = 0;
tcp_hdr(skb)->check =
@@ -2206,7 +2206,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (retval) {
/* TSO */
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
@@ -2367,8 +2367,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
- proto_hdr_len = (skb_transport_offset(skb) +
- tcp_hdrlen(skb));
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (unlikely(proto_hdr_len > len)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 514d61dd91c7..fcaa6a2d067f 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1935,7 +1935,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enet_driver = {
+static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
.remove = bcm_enet_remove,
.driver = {
@@ -2756,7 +2756,7 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enetsw_driver = {
+static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
.remove = bcm_enetsw_remove,
.driver = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5729a5ab059d..712b5595bc39 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3421,12 +3421,9 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Headers length */
if (xmit_type & XMIT_GSO_ENC)
- hlen = (int)(skb_inner_transport_header(skb) -
- skb->data) +
- inner_tcp_hdrlen(skb);
+ hlen = skb_inner_tcp_all_headers(skb);
else
- hlen = (int)(skb_transport_header(skb) -
- skb->data) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
/* Amount of data (w/o headers) on linear part of SKB*/
first_bd_sz = skb_headlen(skb) - hlen;
@@ -3534,15 +3531,13 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data;
+ return skb_inner_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_inner_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
}
/**
@@ -3568,12 +3563,12 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ return skb_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
+ return skb_transport_offset(skb) + sizeof(struct udphdr);
}
/* set FW indication according to inner or outer protocols if tunneled */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 56b46b8206a7..c74b2e4250ad 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -535,12 +535,9 @@ normal_tx:
u32 hdr_len;
if (skb->encapsulation)
- hdr_len = skb_inner_network_offset(skb) +
- skb_inner_network_header_len(skb) +
- inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
else
- hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
@@ -4480,7 +4477,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
}
}
if (irq_reinit) {
- kfree(bp->ntp_fltr_bmap);
+ bitmap_free(bp->ntp_fltr_bmap);
bp->ntp_fltr_bmap = NULL;
}
bp->ntp_fltr_count = 0;
@@ -4499,9 +4496,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
- sizeof(long),
- GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -10658,7 +10653,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
- /* Flush rings and and disable interrupts */
+ /* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f7f10cfb3476..e86503d97f32 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
+ id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
@@ -669,7 +669,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
- kfree(id_tbl->table);
+ bitmap_free(id_tbl->table);
id_tbl->table = NULL;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c28f8cc00d1c..db1e9d810b41 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7944,7 +7944,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
/* HW/FW can not correctly segment packets that have been
* vlan encapsulated.
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f6fe08df568b..29dd0f93d6c0 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2823,8 +2823,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
return -EINVAL;
}
- if (unlikely((gso_size + skb_transport_offset(skb) +
- tcp_hdrlen(skb)) >= skb->len)) {
+ if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
@@ -2872,8 +2871,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
if (unlikely(skb_headlen(skb) <
- skb_transport_offset(skb) +
- tcp_hdrlen(skb))) {
+ skb_tcp_all_headers(skb))) {
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d0ea8dbfa213..d5f7a9fb36d5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2267,7 +2267,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* only queue eth + ip headers separately for UDP */
hdrlen = skb_transport_offset(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
if (skb_headlen(skb) < hdrlen) {
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
/* if this is required, would need to copy to single buffer */
@@ -4600,6 +4600,40 @@ static int fu540_c000_init(struct platform_device *pdev)
return macb_init(pdev);
}
+static int init_reset_optional(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Ensure PHY device used in SGMII mode is ready */
+ bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
+
+ if (IS_ERR(bp->sgmii_phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
+ "failed to get SGMII PHY\n");
+
+ ret = phy_init(bp->sgmii_phy);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to init SGMII PHY\n");
+ }
+
+ /* Fully reset controller at hardware level if mapped in device tree */
+ ret = device_reset_optional(&pdev->dev);
+ if (ret) {
+ phy_exit(bp->sgmii_phy);
+ return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
+ }
+
+ ret = macb_init(pdev);
+ if (ret)
+ phy_exit(bp->sgmii_phy);
+
+ return ret;
+}
+
static const struct macb_usrio_config sama7g5_usrio = {
.mii = 0,
.rmii = 1,
@@ -4626,8 +4660,8 @@ static const struct macb_config at91sam9260_config = {
};
static const struct macb_config sama5d3macb_config = {
- .caps = MACB_CAPS_SG_DISABLED
- | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_SG_DISABLED |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
.usrio = &macb_default_usrio,
@@ -4658,8 +4692,8 @@ static const struct macb_config sama5d29_config = {
};
static const struct macb_config sama5d3_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
- | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4689,55 +4723,13 @@ static const struct macb_config np4_config = {
.usrio = &macb_default_usrio,
};
-static int zynqmp_init(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct macb *bp = netdev_priv(dev);
- int ret;
-
- if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- /* Ensure PS-GTR PHY device used in SGMII mode is ready */
- bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
-
- if (IS_ERR(bp->sgmii_phy)) {
- ret = PTR_ERR(bp->sgmii_phy);
- dev_err_probe(&pdev->dev, ret,
- "failed to get PS-GTR PHY\n");
- return ret;
- }
-
- ret = phy_init(bp->sgmii_phy);
- if (ret) {
- dev_err(&pdev->dev, "failed to init PS-GTR PHY: %d\n",
- ret);
- return ret;
- }
- }
-
- /* Fully reset GEM controller at hardware level using zynqmp-reset driver,
- * if mapped in device tree.
- */
- ret = device_reset_optional(&pdev->dev);
- if (ret) {
- dev_err_probe(&pdev->dev, ret, "failed to reset controller");
- phy_exit(bp->sgmii_phy);
- return ret;
- }
-
- ret = macb_init(pdev);
- if (ret)
- phy_exit(bp->sgmii_phy);
-
- return ret;
-}
-
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
- .init = zynqmp_init,
+ .init = init_reset_optional,
.jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4751,6 +4743,17 @@ static const struct macb_config zynq_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config mpfs_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
MACB_CAPS_MIIONRGMII,
@@ -4787,6 +4790,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
{ /* sentinel */ }
@@ -4796,8 +4800,8 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
static const struct macb_config default_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 4367edbdd579..06397cc8bb36 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1261,7 +1261,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
static int nicvf_tso_count_subdescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int sh_len = skb_tcp_all_headers(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
@@ -1382,7 +1382,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
hdr->tso = 1;
- hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr->tso_start = skb_tcp_all_headers(skb);
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4a872f328fea..7d5204834ee2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -85,7 +85,7 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
if (err) {
dev_err(adap->pdev_dev,
- "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+ "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, err=%d\n",
dcb_ver_array[dcb->dcb_version], app.selector,
app.protocol, -err);
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 7d49fd4edc9e..14e0d989c3ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3429,18 +3429,18 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
unsigned long *t;
struct adapter *adap = filp->private_data;
- t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ t = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!t)
return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
if (err) {
- kfree(t);
+ bitmap_free(t);
return err;
}
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
- kfree(t);
+ bitmap_free(t);
return count;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 6c790af92170..77897edd2bc0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -2227,7 +2227,7 @@ void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
if (eth_filter_info) {
for (i = 0; i < adap->params.nports; i++) {
kvfree(eth_filter_info[i].loc_array);
- kfree(eth_filter_info[i].bmap);
+ bitmap_free(eth_filter_info[i].bmap);
}
kfree(eth_filter_info);
}
@@ -2270,9 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
goto free_eth_finfo;
}
- eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
- sizeof(unsigned long),
- GFP_KERNEL);
+ eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
goto free_eth_finfo;
@@ -2284,7 +2282,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
free_eth_finfo:
while (i-- > 0) {
- kfree(eth_filter->port[i].bmap);
+ bitmap_free(eth_filter->port[i].bmap);
kvfree(eth_filter->port[i].loc_array);
}
kfree(eth_filter_info);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0c78c0db8937..d0061921529f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5047,28 +5047,24 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* Allocate the memory for the vaious egress queue bitmaps
* ie starving_fl, txq_maperr and blocked_fl.
*/
- adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.starving_fl) {
ret = -ENOMEM;
goto bye;
}
- adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.txq_maperr) {
ret = -ENOMEM;
goto bye;
}
#ifdef CONFIG_DEBUG_FS
- adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.blocked_fl) {
ret = -ENOMEM;
goto bye;
}
- bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
#endif
params[0] = FW_PARAM_PFVF(CLIP_START);
@@ -5417,10 +5413,10 @@ bye:
adap_free_hma_mem(adap);
kfree(adap->sge.egr_map);
kfree(adap->sge.ingr_map);
- kfree(adap->sge.starving_fl);
- kfree(adap->sge.txq_maperr);
+ bitmap_free(adap->sge.starving_fl);
+ bitmap_free(adap->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adap->sge.blocked_fl);
+ bitmap_free(adap->sge.blocked_fl);
#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
@@ -5854,8 +5850,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
- sizeof(long), GFP_KERNEL);
+ adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL);
if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
@@ -5870,7 +5865,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
static void free_msix_info(struct adapter *adap)
{
- kfree(adap->msix_bmap.msix_bmap);
+ bitmap_free(adap->msix_bmap.msix_bmap);
kfree(adap->msix_info);
}
@@ -6189,10 +6184,10 @@ static void free_some_resources(struct adapter *adapter)
cxgb4_cleanup_ethtool_filters(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
- kfree(adapter->sge.starving_fl);
- kfree(adapter->sge.txq_maperr);
+ bitmap_free(adapter->sge.starving_fl);
+ bitmap_free(adapter->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adapter->sge.blocked_fl);
+ bitmap_free(adapter->sge.blocked_fl);
#endif
disable_msi(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index f889f404305c..ee52e3b1d74f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1531,7 +1531,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (cxgb4_is_ktls_skb(skb) &&
- (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
+ (skb->len - skb_tcp_all_headers(skb)))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 7de3800437c9..c2822e635f89 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2859,7 +2859,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
* address stored on the adapter
* @adapter: The adapter
*
- * Find the the port mask for the VF based on the index of mac
+ * Find the port mask for the VF based on the index of mac
* address stored in the adapter. If no mac address is stored on
* the adapter for the VF, use the port mask received from the
* firmware.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d546993bda09..1c52592d3b65 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -877,7 +877,7 @@ int t4vf_get_sge_params(struct adapter *adapter)
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
- * separately with the Padding Boundary in SGE_CONTROL and and Packing
+ * separately with the Padding Boundary in SGE_CONTROL and Packing
* Boundary in SGE_CONTROL2. So for T5 and later we need to grab
* SGE_CONTROL in order to determine how ingress packet data will be
* laid out in Packed Buffer Mode. Unfortunately, older versions of
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 60b648b46f75..bfee0e4e54b1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1012,7 +1012,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pktlen = skb_tcp_all_headers(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -1907,7 +1907,7 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
return 0;
th = tcp_hdr(nskb);
- skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
skb_tx_timestamp(nskb);
@@ -1938,7 +1938,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
tcp_seq = ntohl(th->seq);
- skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_offset = skb_tcp_all_headers(skb);
skb_data_len = skb->len - skb_offset;
data_len = skb_data_len;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 1c81b161de52..372fb7b3a282 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -680,11 +680,10 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
skb_frag_t *frag;
if (skb->encapsulation) {
- hdr_len = skb_inner_transport_header(skb) - skb->data;
- hdr_len += inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
} else {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index cd4e243da5fa..414362febbb9 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -737,9 +737,9 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
static int be_gso_hdr_len(struct sk_buff *skb)
{
if (skb->encapsulation)
- return skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -3178,7 +3178,7 @@ static irqreturn_t be_intx(int irq, void *dev)
}
be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
- /* Return IRQ_HANDLED only for the the first spurious intr
+ /* Return IRQ_HANDLED only for the first spurious intr
* after a valid intr to stop the kernel from branding
* this irq as a bad one!
*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a90275143d87..e8e2aa1e7f01 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -691,7 +691,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
void *bufaddr;
unsigned long dmabuf;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 5ff2634bee2f..cb419aef8d1b 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -201,7 +201,7 @@ void fs_enet_platform_cleanup(void);
/* access macros */
#if defined(CONFIG_CPM1)
-/* for a a CPM1 __raw_xxx's are sufficient */
+/* for a CPM1 __raw_xxx's are sufficient */
#define __cbd_out32(addr, x) __raw_writel(x, addr)
#define __cbd_out16(addr, x) __raw_writew(x, addr)
#define __cbd_in32(addr) __raw_readl(addr)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3dc9369a33f7..e7bf1524b68e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1944,6 +1944,7 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
+ skb_tx_timestamp(skb);
netdev_tx_sent_queue(txq, bytes_sent);
gfar_wmb();
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 9a2c16d69e2c..81fb68730138 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1457,6 +1457,7 @@ static int gfar_get_ts_info(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
return 0;
}
@@ -1474,7 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/fungible/funcore/fun_hci.h b/drivers/net/ethernet/fungible/funcore/fun_hci.h
index 257203e94b68..f21819670106 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_hci.h
+++ b/drivers/net/ethernet/fungible/funcore/fun_hci.h
@@ -442,6 +442,7 @@ enum fun_port_lane_attr {
};
enum fun_admin_port_subop {
+ FUN_ADMIN_PORT_SUBOP_XCVR_READ = 0x23,
FUN_ADMIN_PORT_SUBOP_INETADDR_EVENT = 0x24,
};
@@ -595,6 +596,19 @@ struct fun_admin_port_req {
struct fun_admin_read48_req read48[];
} read;
+ struct fun_admin_port_xcvr_read_req {
+ u8 subop;
+ u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+ } xcvr_read;
struct fun_admin_port_inetaddr_event_req {
__u8 subop;
__u8 rsvd0;
@@ -625,6 +639,15 @@ struct fun_admin_port_req {
.id = cpu_to_be32(_id), \
}
+#define FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(_flags, _id, _bank, _page, \
+ _offset, _length, _dev_addr) \
+ ((struct fun_admin_port_xcvr_read_req) { \
+ .subop = FUN_ADMIN_PORT_SUBOP_XCVR_READ, \
+ .flags = cpu_to_be16(_flags), .id = cpu_to_be32(_id), \
+ .bank = (_bank), .page = (_page), .offset = (_offset), \
+ .length = (_length), .dev_addr = (_dev_addr), \
+ })
+
struct fun_admin_port_rsp {
struct fun_admin_rsp_common common;
@@ -659,6 +682,23 @@ struct fun_admin_port_rsp {
} u;
};
+struct fun_admin_port_xcvr_read_rsp {
+ struct fun_admin_rsp_common common;
+
+ u8 subop;
+ u8 rsvd0[3];
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+
+ u8 data[128];
+};
+
enum fun_xcvr_type {
FUN_XCVR_BASET = 0x0,
FUN_XCVR_CU = 0x1,
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index d081168c95fa..31aa185f4d17 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -78,6 +78,7 @@ static const char * const txq_stat_names[] = {
"tx_cso",
"tx_tso",
"tx_encapsulated_tso",
+ "tx_uso",
"tx_more",
"tx_queue_stops",
"tx_queue_restarts",
@@ -778,6 +779,7 @@ static void fun_get_ethtool_stats(struct net_device *netdev,
ADD_STAT(txs.tx_cso);
ADD_STAT(txs.tx_tso);
ADD_STAT(txs.tx_encap_tso);
+ ADD_STAT(txs.tx_uso);
ADD_STAT(txs.tx_more);
ADD_STAT(txs.tx_nstops);
ADD_STAT(txs.tx_nrestarts);
@@ -1116,6 +1118,39 @@ static int fun_set_fecparam(struct net_device *netdev,
return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
}
+static int fun_get_port_module_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *req,
+ struct netlink_ext_ack *extack)
+{
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_xcvr_read_rsp rsp;
+ } cmd;
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Specified port is virtual, only physical ports have modules");
+ return -EOPNOTSUPP;
+ }
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ sizeof(cmd.req));
+ cmd.req.u.xcvr_read =
+ FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(0, netdev->dev_port,
+ req->bank, req->page,
+ req->offset, req->length,
+ req->i2c_address);
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ memcpy(req->data, cmd.rsp.data, req->length);
+ return req->length;
+}
+
static const struct ethtool_ops fun_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1154,6 +1189,7 @@ static const struct ethtool_ops fun_ethtool_ops = {
.get_eth_mac_stats = fun_get_802_3_stats,
.get_eth_ctrl_stats = fun_get_802_3_ctrl_stats,
.get_rmon_stats = fun_get_rmon_stats,
+ .get_module_eeprom_by_page = fun_get_port_module_page,
};
void fun_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index 9485cf699c5d..f247b7ad3a88 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -1357,7 +1357,8 @@ static const struct net_device_ops fun_netdev_ops = {
#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
-#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
+ NETIF_F_GSO_UDP_L4)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
index ff6e29237253..a97e3af00cb9 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_tx.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
@@ -83,7 +83,7 @@ static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
const struct fun_ktls_tx_ctx *tls_ctx;
u32 datalen, seq;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
if (!datalen)
return skb;
@@ -130,6 +130,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
struct fun_dataop_gl *gle;
const struct tcphdr *th;
unsigned int ngle, i;
+ unsigned int l4_hlen;
u16 flags;
if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
@@ -178,6 +179,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
FUN_ETH_UPDATE_INNER_L3_LEN;
}
th = inner_tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
fun_eth_offload_init(&req->offload, flags,
shinfo->gso_size,
tcp_hdr_doff_flags(th), 0,
@@ -185,6 +187,24 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
skb_inner_transport_offset(skb),
skb_network_offset(skb), ol4_ofst);
FUN_QSTAT_INC(q, tx_encap_tso);
+ } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+ flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_INNER_L4_LEN |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+
+ if (ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+ else
+ flags |= FUN_ETH_INNER_IPV6;
+
+ l4_hlen = sizeof(struct udphdr);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ cpu_to_be16(l4_hlen << 10), 0,
+ skb_network_offset(skb),
+ skb_transport_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_uso);
} else {
/* HW considers one set of headers as inner */
flags = FUN_ETH_INNER_LSO |
@@ -195,6 +215,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
else
flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
th = tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
fun_eth_offload_init(&req->offload, flags,
shinfo->gso_size,
tcp_hdr_doff_flags(th), 0,
@@ -209,7 +230,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
extra_pkts = shinfo->gso_segs - 1;
extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
- __tcp_hdrlen(th)) * extra_pkts;
+ l4_hlen) * extra_pkts;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
if (skb->csum_offset == offsetof(struct udphdr, check))
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 04c9f91b7489..1711f82cad71 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -82,6 +82,7 @@ struct funeth_txq_stats { /* per Tx queue SW counters */
u64 tx_cso; /* # of packets with checksum offload */
u64 tx_tso; /* # of non-encapsulated TSO super-packets */
u64 tx_encap_tso; /* # of encapsulated TSO super-packets */
+ u64 tx_uso; /* # of non-encapsulated UDP LSO super-packets */
u64 tx_more; /* # of DBs elided due to xmit_more */
u64 tx_nstops; /* # of times the queue has stopped */
u64 tx_nrestarts; /* # of times the queue has restarted */
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index ec394d991668..588d64819ed5 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -386,7 +386,7 @@ static int gve_prep_tso(struct sk_buff *skb)
(__force __wsum)htonl(paylen));
/* Compute length of segmentation header. */
- header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ header_len = skb_tcp_all_headers(skb);
break;
default:
return -EINVAL;
@@ -598,9 +598,9 @@ static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
*/
static bool gve_can_send_tso(const struct sk_buff *skb)
{
- const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
int cur_seg_size;
@@ -795,7 +795,7 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
/* No outstanding miss completion but packet allocated
* implies packet receives a re-injection completion
- * without a a prior miss completion. Return without
+ * without a prior miss completion. Return without
* completing the packet.
*/
net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 2f0bd21a9082..d94cc8c6681f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -31,8 +31,6 @@
#define HNS_BUFFER_SIZE_2048 2048
#define BD_MAX_SEND_SIZE 8191
-#define SKB_TMP_LEN(SKB) \
- (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
int send_sz, dma_addr_t dma, int frag_end,
@@ -94,7 +92,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
@@ -108,7 +106,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
}
desc->tx.ip_offset = ip_offset;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index ae56306400b8..35d70041b9e8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1838,9 +1838,9 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
{
if (!skb->encapsulation)
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_tcp_all_headers(skb);
- return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
}
/* HW need every continuous max_non_tso_bd_num buffer data to be larger
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
index 5153e5d41bbd..b8a1ecb4b8fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -37,8 +37,7 @@ DECLARE_EVENT_CLASS(hns3_skb_template,
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
__entry->gso_type = skb_shinfo(skb)->gso_type;
__entry->hdr_len = skb->encapsulation ?
- skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
- skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_inner_tcp_all_headers(skb) : skb_tcp_all_headers(skb);
__entry->ip_summed = skb->ip_summed;
__entry->fraglist = skb_has_frag_list(skb);
hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5eaf09ea4009..26f87330173e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -979,7 +979,7 @@ static int hclgevf_update_mac_list(struct hnae3_handle *handle,
/* if the mac addr is already in the mac list, no need to add a new
* one into it, just check the mac addr state, convert it to a new
- * new state, or just remove it, or do nothing.
+ * state, or just remove it, or do nothing.
*/
mac_node = hclgevf_find_mac_node(list, addr);
if (mac_node) {
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 07fdab58001d..c2ae1b4f9a5f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -174,7 +174,7 @@ static int hns_mdio_wait_ready(struct mii_bus *bus)
u32 cmd_reg_value;
int i;
- /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+ /* waiting for MDIO_COMMAND_REG's mdio_start==0 */
/* after that can do read or write*/
for (i = 0; i < MDIO_TIMEOUT; i++) {
cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
@@ -319,7 +319,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
MDIO_C45_READ, phy_id, devad);
}
- /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* Step 5: waiting for MDIO_COMMAND_REG's mdio_start==0,*/
/* check for read or write opt is finished */
ret = hns_mdio_wait_ready(bus);
if (ret) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index fb3e89141a0d..a4fbf44f944c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -95,9 +95,6 @@ struct hinic_dev {
u16 sq_depth;
u16 rq_depth;
- struct hinic_txq_stats tx_stats;
- struct hinic_rxq_stats rx_stats;
-
u8 rss_tmpl_idx;
u8 rss_hash_engine;
u16 num_rss;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 05329292d940..c23ee2ddbce3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -62,8 +62,6 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define HINIC_LRO_RX_TIMER_DEFAULT 16
-#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8)
-
#define work_to_rx_mode_work(work) \
container_of(work, struct hinic_rx_mode_work, work)
@@ -82,56 +80,44 @@ static int set_features(struct hinic_dev *nic_dev,
netdev_features_t pre_features,
netdev_features_t features, bool force_change);
-static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
+static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
{
- struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats;
struct hinic_rxq_stats rx_stats;
- u64_stats_init(&rx_stats.syncp);
-
hinic_rxq_get_stats(rxq, &rx_stats);
- u64_stats_update_begin(&nic_rx_stats->syncp);
nic_rx_stats->bytes += rx_stats.bytes;
nic_rx_stats->pkts += rx_stats.pkts;
nic_rx_stats->errors += rx_stats.errors;
nic_rx_stats->csum_errors += rx_stats.csum_errors;
nic_rx_stats->other_errors += rx_stats.other_errors;
- u64_stats_update_end(&nic_rx_stats->syncp);
-
- hinic_rxq_clean_stats(rxq);
}
-static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
+static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq)
{
- struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats;
struct hinic_txq_stats tx_stats;
- u64_stats_init(&tx_stats.syncp);
-
hinic_txq_get_stats(txq, &tx_stats);
- u64_stats_update_begin(&nic_tx_stats->syncp);
nic_tx_stats->bytes += tx_stats.bytes;
nic_tx_stats->pkts += tx_stats.pkts;
nic_tx_stats->tx_busy += tx_stats.tx_busy;
nic_tx_stats->tx_wake += tx_stats.tx_wake;
nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
- u64_stats_update_end(&nic_tx_stats->syncp);
-
- hinic_txq_clean_stats(txq);
}
-static void update_nic_stats(struct hinic_dev *nic_dev)
+static void gather_nic_stats(struct hinic_dev *nic_dev,
+ struct hinic_rxq_stats *nic_rx_stats,
+ struct hinic_txq_stats *nic_tx_stats)
{
int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
for (i = 0; i < num_qps; i++)
- update_rx_stats(nic_dev, &nic_dev->rxqs[i]);
+ gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]);
for (i = 0; i < num_qps; i++)
- update_tx_stats(nic_dev, &nic_dev->txqs[i]);
+ gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]);
}
/**
@@ -560,8 +546,6 @@ int hinic_close(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- update_nic_stats(nic_dev);
-
up(&nic_dev->mgmt_lock);
if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
@@ -855,26 +839,19 @@ static void hinic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rxq_stats *nic_rx_stats;
- struct hinic_txq_stats *nic_tx_stats;
-
- nic_rx_stats = &nic_dev->rx_stats;
- nic_tx_stats = &nic_dev->tx_stats;
-
- down(&nic_dev->mgmt_lock);
+ struct hinic_rxq_stats nic_rx_stats = {};
+ struct hinic_txq_stats nic_tx_stats = {};
if (nic_dev->flags & HINIC_INTF_UP)
- update_nic_stats(nic_dev);
-
- up(&nic_dev->mgmt_lock);
+ gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats);
- stats->rx_bytes = nic_rx_stats->bytes;
- stats->rx_packets = nic_rx_stats->pkts;
- stats->rx_errors = nic_rx_stats->errors;
+ stats->rx_bytes = nic_rx_stats.bytes;
+ stats->rx_packets = nic_rx_stats.pkts;
+ stats->rx_errors = nic_rx_stats.errors;
- stats->tx_bytes = nic_tx_stats->bytes;
- stats->tx_packets = nic_tx_stats->pkts;
- stats->tx_errors = nic_tx_stats->tx_dropped;
+ stats->tx_bytes = nic_tx_stats.bytes;
+ stats->tx_packets = nic_tx_stats.pkts;
+ stats->tx_errors = nic_tx_stats.tx_dropped;
}
static int hinic_set_features(struct net_device *netdev,
@@ -1173,8 +1150,6 @@ static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
static int nic_dev_init(struct pci_dev *pdev)
{
struct hinic_rx_mode_work *rx_mode_work;
- struct hinic_txq_stats *tx_stats;
- struct hinic_rxq_stats *rx_stats;
struct hinic_dev *nic_dev;
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -1236,15 +1211,8 @@ static int nic_dev_init(struct pci_dev *pdev)
sema_init(&nic_dev->mgmt_lock, 1);
- tx_stats = &nic_dev->tx_stats;
- rx_stats = &nic_dev->rx_stats;
-
- u64_stats_init(&tx_stats->syncp);
- u64_stats_init(&rx_stats->syncp);
-
- nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev,
- VLAN_BITMAP_SIZE(nic_dev),
- GFP_KERNEL);
+ nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!nic_dev->vlan_bitmap) {
err = -ENOMEM;
goto err_vlan_bitmap;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 24b7b819dbfb..a866bea65110 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -73,7 +73,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
@@ -83,7 +82,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 87408e7bb809..5051cdff2384 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -98,7 +98,6 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
@@ -108,7 +107,6 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
}
/**
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 8ce3348edf08..5dc302880f5f 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1617,7 +1617,7 @@ static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
* For TSO packets we only copy the headers into the
* immediate area.
*/
- immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ immediate_len = skb_tcp_all_headers(skb);
}
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7e7fe5bdf1f8..5ab7c0f81e9a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -5981,6 +5981,15 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
release_sub_crqs(adapter, 0);
rc = init_sub_crqs(adapter);
} else {
+ /* no need to reinitialize completely, but we do
+ * need to clean up transmits that were in flight
+ * when we processed the reset. Failure to do so
+ * will confound the upper layer, usually TCP, by
+ * creating the illusion of transmits that are
+ * awaiting completion.
+ */
+ clean_tx_pools(adapter);
+
rc = reset_sub_crq_queues(adapter);
}
} else {
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 36418b510dde..11a884aa5082 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1430,7 +1430,6 @@ static int e100_phy_check_without_mii(struct nic *nic)
#define MII_NSC_CONG MII_RESV1
#define NSC_CONG_ENABLE 0x0100
#define NSC_CONG_TXREADY 0x0400
-#define ADVERTISE_FC_SUPPORTED 0x0400
static int e100_phy_init(struct nic *nic)
{
struct net_device *netdev = nic->netdev;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index f8860f24ede0..4542e2bc28e8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -2000,7 +2000,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3f5feb55cfba..23299fc56199 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2708,7 +2708,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -3139,7 +3139,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
case e1000_82544: {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 4d4f5bf1e516..f4154ca7fcb4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -82,7 +82,6 @@ E1000_PARAM(Duplex, "Duplex setting");
*/
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
#define AUTONEG_ADV_DEFAULT 0x2F
-#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
@@ -95,7 +94,6 @@ E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
* Default Value: Read flow control settings from the EEPROM
*/
E1000_PARAM(FlowControl, "Flow Control setting");
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 51512a73fdd0..5df7ad93f3d7 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -957,7 +957,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fa06f68c8c80..38e60def8520 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5474,7 +5474,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -5846,7 +5846,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* points to just header, pull a few bytes of payload from
* frags into skb->data
*/
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
/* we do this workaround for ES2LAN, but it is un-necessary,
* avoiding it could save a lot of cycles
*/
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ebe121db4307..3132d8f2f207 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -101,8 +101,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
* demoted to the most advanced interrupt mode available.
*/
E1000_PARAM(IntMode, "Interrupt Mode");
-#define MAX_INTMODE 2
-#define MIN_INTMODE 0
/* Enable Smart Power Down of the PHY
*
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index f2fba6e1d0f7..87fa5874f16e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -809,7 +809,7 @@ static s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
- * This function copies the message from the the message array to mbmem
+ * This function copies the message from the message array to mbmem
**/
static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
{
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index f6d56867f857..75cbdf2dbbe3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -78,7 +78,7 @@ static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
* @string: Pointer to location of destination string
*
* This function pulls the string back out of the attribute and will place
- * it in the array pointed by by string. It will return success if provided
+ * it in the array pointed by string. It will return success if provided
* with a valid pointers.
**/
static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
@@ -584,7 +584,7 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
* @mbx: Unused mailbox pointer
*
* This function is a default handler for unrecognized messages. At a
- * a minimum it just indicates that the message requested was
+ * minimum it just indicates that the message requested was
* unimplemented.
**/
s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 57f4ec4f8d2f..97c574a33ba0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -37,6 +37,7 @@
#include <net/tc_act/tc_mirred.h>
#include <net/udp_tunnel.h>
#include <net/xdp_sock.h>
+#include <linux/bitfield.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include <linux/net/intel/i40e_client.h>
@@ -1093,6 +1094,21 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
(u32)(val & 0xFFFFFFFFULL));
}
+/**
+ * i40e_get_pf_count - get PCI PF count.
+ * @hw: pointer to a hw.
+ *
+ * Reports the function number of the highest PCI physical
+ * function plus 1 as it is loaded from the NVM.
+ *
+ * Return: PCI PF count.
+ **/
+static inline u32 i40e_get_pf_count(struct i40e_hw *hw)
+{
+ return FIELD_GET(I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK,
+ rd32(hw, I40E_GLGEN_PCIFCNCNT));
+}
+
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 552aad6ae1ca..d9a934ca3eba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -236,8 +236,6 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat)
-#define I40E_QUEUE_STAT(_name, _stat) \
- I40E_STAT(struct i40e_ring, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -1143,6 +1141,71 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
return 0;
}
+#define I40E_LBIT_SIZE 8
+/**
+ * i40e_speed_to_link_speed - Translate decimal speed to i40e_aq_link_speed
+ * @speed: speed in decimal
+ * @ks: ethtool ksettings
+ *
+ * Return i40e_aq_link_speed based on speed
+ **/
+static enum i40e_aq_link_speed
+i40e_speed_to_link_speed(__u32 speed, const struct ethtool_link_ksettings *ks)
+{
+ enum i40e_aq_link_speed link_speed = I40E_LINK_SPEED_UNKNOWN;
+ bool speed_changed = false;
+ int i, j;
+
+ static const struct {
+ __u32 speed;
+ enum i40e_aq_link_speed link_speed;
+ __u8 bit[I40E_LBIT_SIZE];
+ } i40e_speed_lut[] = {
+#define I40E_LBIT(mode) ETHTOOL_LINK_MODE_ ## mode ##_Full_BIT
+ {SPEED_100, I40E_LINK_SPEED_100MB, {I40E_LBIT(100baseT)} },
+ {SPEED_1000, I40E_LINK_SPEED_1GB,
+ {I40E_LBIT(1000baseT), I40E_LBIT(1000baseX),
+ I40E_LBIT(1000baseKX)} },
+ {SPEED_10000, I40E_LINK_SPEED_10GB,
+ {I40E_LBIT(10000baseT), I40E_LBIT(10000baseKR),
+ I40E_LBIT(10000baseLR), I40E_LBIT(10000baseCR),
+ I40E_LBIT(10000baseSR), I40E_LBIT(10000baseKX4)} },
+
+ {SPEED_25000, I40E_LINK_SPEED_25GB,
+ {I40E_LBIT(25000baseCR), I40E_LBIT(25000baseKR),
+ I40E_LBIT(25000baseSR)} },
+ {SPEED_40000, I40E_LINK_SPEED_40GB,
+ {I40E_LBIT(40000baseKR4), I40E_LBIT(40000baseCR4),
+ I40E_LBIT(40000baseSR4), I40E_LBIT(40000baseLR4)} },
+ {SPEED_20000, I40E_LINK_SPEED_20GB,
+ {I40E_LBIT(20000baseKR2)} },
+ {SPEED_2500, I40E_LINK_SPEED_2_5GB, {I40E_LBIT(2500baseT)} },
+ {SPEED_5000, I40E_LINK_SPEED_5GB, {I40E_LBIT(2500baseT)} }
+#undef I40E_LBIT
+};
+
+ for (i = 0; i < ARRAY_SIZE(i40e_speed_lut); i++) {
+ if (i40e_speed_lut[i].speed == speed) {
+ for (j = 0; j < I40E_LBIT_SIZE; j++) {
+ if (test_bit(i40e_speed_lut[i].bit[j],
+ ks->link_modes.supported)) {
+ speed_changed = true;
+ break;
+ }
+ if (!i40e_speed_lut[i].bit[j])
+ break;
+ }
+ if (speed_changed) {
+ link_speed = i40e_speed_lut[i].link_speed;
+ break;
+ }
+ }
+ }
+ return link_speed;
+}
+
+#undef I40E_LBIT_SIZE
+
/**
* i40e_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
@@ -1159,12 +1222,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings copy_ks;
struct i40e_aq_set_phy_config config;
struct i40e_pf *pf = np->vsi->back;
+ enum i40e_aq_link_speed link_speed;
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
bool autoneg_changed = false;
i40e_status status = 0;
int timeout = 50;
int err = 0;
+ __u32 speed;
u8 autoneg;
/* Changing port settings is not supported if this isn't the
@@ -1197,6 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
+ speed = copy_ks.base.speed;
/* get our own copy of the bits to check against */
memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
@@ -1215,6 +1281,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* set autoneg back to what it currently is */
copy_ks.base.autoneg = safe_ks.base.autoneg;
+ copy_ks.base.speed = safe_ks.base.speed;
/* If copy_ks.base and safe_ks.base are not the same now, then they are
* trying to set something that we do not support.
@@ -1331,6 +1398,27 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
40000baseLR4_Full))
config.link_speed |= I40E_LINK_SPEED_40GB;
+ /* Autonegotiation must be disabled to change speed */
+ if ((speed != SPEED_UNKNOWN && safe_ks.base.speed != speed) &&
+ (autoneg == AUTONEG_DISABLE ||
+ (safe_ks.base.autoneg == AUTONEG_DISABLE && !autoneg_changed))) {
+ link_speed = i40e_speed_to_link_speed(speed, ks);
+ if (link_speed == I40E_LINK_SPEED_UNKNOWN) {
+ netdev_info(netdev, "Given speed is not supported\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ } else {
+ config.link_speed = link_speed;
+ }
+ } else {
+ if (safe_ks.base.speed != speed) {
+ netdev_info(netdev,
+ "Unable to set speed, disable autoneg\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ }
+
/* If speed didn't get set, set it to what it currently is.
* This is needed because if advertise is 0 (as it is when autoneg
* is disabled) then speed won't get set.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 83e0cf475ebd..151e9b6b9df4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -551,6 +551,47 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
}
/**
+ * i40e_compute_pci_to_hw_id - compute index form PCI function.
+ * @vsi: ptr to the VSI to read from.
+ * @hw: ptr to the hardware info.
+ **/
+static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
+{
+ int pf_count = i40e_get_pf_count(hw);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
+
+ return hw->port + BIT(7);
+}
+
+/**
+ * i40e_stat_update64 - read and update a 64 bit stat from the chip.
+ * @hw: ptr to the hardware info.
+ * @hireg: the high 32 bit reg to read.
+ * @loreg: the low 32 bit reg to read.
+ * @offset_loaded: has the initial offset been loaded yet.
+ * @offset: ptr to current offset value.
+ * @stat: ptr to the stat.
+ *
+ * Since the device stats are not reset at PFReset, they will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ new_data = rd64(hw, loreg);
+
+ if (!offset_loaded || new_data < *offset)
+ *offset = new_data;
+ *stat = new_data - *offset;
+}
+
+/**
* i40e_stat_update48 - read and update a 48 bit stat from the chip
* @hw: ptr to the hardware info
* @hireg: the high 32 bit reg to read
@@ -622,6 +663,34 @@ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
}
/**
+ * i40e_stats_update_rx_discards - update rx_discards.
+ * @vsi: ptr to the VSI to be updated.
+ * @hw: ptr to the hardware info.
+ * @stat_idx: VSI's stat_counter_idx.
+ * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
+ * @stat_offset: ptr to stat_offset to store first read of specific register.
+ * @stat: ptr to VSI's stat to be updated.
+ **/
+static void
+i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
+ int stat_idx, bool offset_loaded,
+ struct i40e_eth_stats *stat_offset,
+ struct i40e_eth_stats *stat)
+{
+ u64 rx_rdpc, rx_rxerr;
+
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
+ &stat_offset->rx_discards, &rx_rdpc);
+ i40e_stat_update64(hw,
+ I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
+ I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
+ offset_loaded, &stat_offset->rx_discards_other,
+ &rx_rxerr);
+
+ stat->rx_discards = rx_rdpc + rx_rxerr;
+}
+
+/**
* i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
* @vsi: the VSI to be updated
**/
@@ -680,6 +749,10 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
I40E_GLV_BPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
+
+ i40e_stats_update_rx_discards(vsi, hw, stat_idx,
+ vsi->stat_offsets_loaded, oes, es);
+
vsi->stat_offsets_loaded = true;
}
@@ -4162,7 +4235,6 @@ static void i40e_free_misc_vector(struct i40e_pf *pf)
i40e_flush(&pf->hw);
if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
}
@@ -4901,7 +4973,6 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */
irq_update_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
@@ -13158,7 +13229,7 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 61e5789d78db..57a71fa17ed5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -27,7 +27,6 @@
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB
-#define to_dev(obj) container_of(obj, struct device, kobj)
enum i40e_ptp_pin {
SDP3_2 = 0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 1908eed4fa5e..7339003aa17c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -211,6 +211,11 @@
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
@@ -643,6 +648,14 @@
#define I40E_VFQF_HKEY1_MAX_INDEX 12
#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_GL_RXERR1H(_i) (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX 143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT 0
+#define I40E_GL_RXERR1H_RXERR1H_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX 143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT 0
+#define I40E_GL_RXERR1L_RXERR1L_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b7967105a549..f6ba97a0166e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -372,7 +372,6 @@ static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
}
}
-#define IP_HEADER_OFFSET 14
#define I40E_UDPIP_DUMMY_PACKET_LEN 42
#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
/**
@@ -1483,10 +1482,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
@@ -2291,16 +2288,14 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
* i40e_run_xdp - run an XDP program
* @rx_ring: Rx ring being processed
* @xdp: XDP buffer containing the frame
+ * @xdp_prog: XDP program to run
**/
-static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
-
if (!xdp_prog)
goto xdp_out;
@@ -2445,6 +2440,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int offset = rx_ring->rx_offset;
struct sk_buff *skb = rx_ring->skb;
unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
struct xdp_buff xdp;
int xdp_res = 0;
@@ -2454,6 +2450,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
#endif
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
@@ -2514,7 +2512,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
- xdp_res = i40e_run_xdp(rx_ring, &xdp);
+ xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog);
}
if (xdp_res) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 36a4ca1ffb1a..7b3f30beb757 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1172,6 +1172,7 @@ struct i40e_eth_stats {
u64 tx_broadcast; /* bptc */
u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */
+ u64 rx_discards_other; /* rxerr1 */
};
/* Statistics collected per VEB per TC */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index d01fb592778c..4f184c50f6e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2147,6 +2147,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
/* VFs only use TC 0 */
vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+ eth_zero_addr(vf->default_lan_addr.addr);
+ }
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index af3e7e6afc85..6d4009e0cbd6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -143,20 +143,17 @@ int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
* i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
*
* Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
**/
-static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- /* NB! xdp_prog will always be !NULL, due to the fact that
- * this path is enabled by setting an XDP program.
- */
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
@@ -339,9 +336,15 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
u16 cleaned_count;
+ /* NB! xdp_prog will always be !NULL, due to the fact that
+ * this path is enabled by setting an XDP program.
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
unsigned int rx_packets;
@@ -378,7 +381,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
- xdp_res = i40e_run_xdp_zc(rx_ring, bi);
+ xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
&rx_bytes, size, xdp_res, &failure);
if (failure)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 541103909ef4..69ade653f5d4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -4250,7 +4250,7 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index e2b4ba98f71e..0d22bba6a5f2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -5,10 +5,6 @@
#include "iavf_prototype.h"
#include "iavf_client.h"
-/* busy wait delay in msec */
-#define IAVF_BUSY_WAIT_DELAY 10
-#define IAVF_BUSY_WAIT_COUNT 50
-
/**
* iavf_send_pf_msg
* @adapter: adapter structure
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1e71b70f0e52..70335f6e8524 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -2190,6 +2190,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
}
/**
+ * ice_set_phy_type_from_speed - set phy_types based on speeds
+ * and advertised modes
+ * @ks: ethtool link ksettings struct
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
+ * @adv_link_speed: targeted link speeds bitmap
+ */
+static void
+ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
+ u64 *phy_type_low, u64 *phy_type_high,
+ u16 adv_link_speed)
+{
+ /* Handle 1000M speed in a special way because ice_update_phy_type
+ * enables all link modes, but having mixed copper and optical
+ * standards is not supported.
+ */
+ adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
+ ICE_PHY_TYPE_LOW_1G_SGMII;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
+ ICE_PHY_TYPE_LOW_1000BASE_LX;
+
+ ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
+}
+
+/**
* ice_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
* @ks: ethtool ksettings
@@ -2320,7 +2356,8 @@ ice_set_link_ksettings(struct net_device *netdev,
adv_link_speed = curr_link_speed;
/* Convert the advertise link speeds to their corresponded PHY_TYPE */
- ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
+ ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
+ adv_link_speed);
if (!autoneg_changed && adv_link_speed == curr_link_speed) {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -3470,6 +3507,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
new_rx = ch->combined_count + ch->rx_count;
new_tx = ch->combined_count + ch->tx_count;
+ if (new_rx < vsi->tc_cfg.numtc) {
+ netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n",
+ vsi->tc_cfg.numtc);
+ return -EINVAL;
+ }
+ if (new_tx < vsi->tc_cfg.numtc) {
+ netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n",
+ vsi->tc_cfg.numtc);
+ return -EINVAL;
+ }
if (new_rx > ice_get_max_rxq(pf)) {
netdev_err(dev, "Maximum allowed Rx channels is %d\n",
ice_get_max_rxq(pf));
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index c73cdab44f70..ada5198b5b16 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -2639,7 +2639,7 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
*
* This function will either add or move a ptype to a particular PTG depending
* on if the ptype is already part of another group. Note that using a
- * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
static int
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 4f954db01b92..c9f7393b783d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -447,11 +447,9 @@ void ice_deinit_lag(struct ice_pf *pf)
if (lag->pf)
ice_unregister_lag_handler(lag);
- if (lag->upper_netdev)
- dev_put(lag->upper_netdev);
+ dev_put(lag->upper_netdev);
- if (lag->peer_netdev)
- dev_put(lag->peer_netdev);
+ dev_put(lag->peer_netdev);
kfree(lag);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index b28fb8eacffb..a6c4be5e5566 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -912,7 +912,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
* @vsi: the VSI being configured
* @ctxt: VSI context structure
*/
-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
u16 num_txq_per_tc, num_rxq_per_tc;
@@ -985,7 +985,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
else
vsi->num_rxq = num_rxq_per_tc;
+ if (vsi->num_rxq > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+ vsi->num_rxq, vsi->alloc_rxq);
+ return -EINVAL;
+ }
+
vsi->num_txq = tx_count;
+ if (vsi->num_txq > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+ vsi->num_txq, vsi->alloc_txq);
+ return -EINVAL;
+ }
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1003,6 +1014,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
*/
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
+
+ return 0;
}
/**
@@ -1190,7 +1203,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (vsi->type == ICE_VSI_CHNL) {
ice_chnl_vsi_setup_q_map(vsi, ctxt);
} else {
- ice_vsi_setup_q_map(vsi, ctxt);
+ ret = ice_vsi_setup_q_map(vsi, ctxt);
+ if (ret)
+ goto out;
+
if (!init_vsi) /* means VSI being updated */
/* must to indicate which section of VSI context are
* being modified
@@ -3467,7 +3483,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
*
* Prepares VSI tc_config to have queue configurations based on MQPRIO options.
*/
-static void
+static int
ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
u8 ena_tc)
{
@@ -3516,7 +3532,18 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
/* Set actual Tx/Rx queue pairs */
vsi->num_txq = offset + qcount_tx;
+ if (vsi->num_txq > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+ vsi->num_txq, vsi->alloc_txq);
+ return -EINVAL;
+ }
+
vsi->num_rxq = offset + qcount_rx;
+ if (vsi->num_rxq > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+ vsi->num_rxq, vsi->alloc_rxq);
+ return -EINVAL;
+ }
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
@@ -3534,6 +3561,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
+
+ return 0;
}
/**
@@ -3583,9 +3612,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
if (vsi->type == ICE_VSI_PF &&
test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+ ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
else
- ice_vsi_setup_q_map(vsi, ctx);
+ ret = ice_vsi_setup_q_map(vsi, ctx);
+
+ if (ret)
+ goto out;
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 3f64300b0e14..d4a0d089649c 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -43,6 +43,8 @@ enum ice_protocol_type {
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
+ ICE_VLAN_EX,
+ ICE_VLAN_IN,
ICE_VXLAN_GPE,
ICE_SCTP_IL,
ICE_PROTOCOL_LAST
@@ -109,13 +111,18 @@ enum ice_prot_id {
#define ICE_GRE_OF_HW 64
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
-#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
+#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
#define ICE_MDID_SIZE 2
+
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MASK 0xFF
+#define ICE_VLAN_FLAG_MDID 20
+#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
+#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
+
#define ICE_TUN_FLAG_FV_IND 2
/* Mapping of software defined protocol ID to hardware defined protocol ID */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 8d8f3eec79ee..2d1274774987 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -31,16 +31,16 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
enum {
- ICE_PKT_VLAN = BIT(0),
- ICE_PKT_OUTER_IPV6 = BIT(1),
- ICE_PKT_TUN_GTPC = BIT(2),
- ICE_PKT_TUN_GTPU = BIT(3),
- ICE_PKT_TUN_NVGRE = BIT(4),
- ICE_PKT_TUN_UDP = BIT(5),
- ICE_PKT_INNER_IPV6 = BIT(6),
- ICE_PKT_INNER_TCP = BIT(7),
- ICE_PKT_INNER_UDP = BIT(8),
- ICE_PKT_GTP_NOPAY = BIT(9),
+ ICE_PKT_OUTER_IPV6 = BIT(0),
+ ICE_PKT_TUN_GTPC = BIT(1),
+ ICE_PKT_TUN_GTPU = BIT(2),
+ ICE_PKT_TUN_NVGRE = BIT(3),
+ ICE_PKT_TUN_UDP = BIT(4),
+ ICE_PKT_INNER_IPV6 = BIT(5),
+ ICE_PKT_INNER_TCP = BIT(6),
+ ICE_PKT_INNER_UDP = BIT(7),
+ ICE_PKT_GTP_NOPAY = BIT(8),
+ ICE_PKT_KMALLOC = BIT(9),
};
struct ice_dummy_pkt_offsets {
@@ -53,22 +53,42 @@ struct ice_dummy_pkt_profile {
const u8 *pkt;
u32 match;
u16 pkt_len;
+ u16 offsets_len;
};
-#define ICE_DECLARE_PKT_OFFSETS(type) \
- static const struct ice_dummy_pkt_offsets \
+#define ICE_DECLARE_PKT_OFFSETS(type) \
+ static const struct ice_dummy_pkt_offsets \
ice_dummy_##type##_packet_offsets[]
-#define ICE_DECLARE_PKT_TEMPLATE(type) \
+#define ICE_DECLARE_PKT_TEMPLATE(type) \
static const u8 ice_dummy_##type##_packet[]
-#define ICE_PKT_PROFILE(type, m) { \
- .match = (m), \
- .pkt = ice_dummy_##type##_packet, \
- .pkt_len = sizeof(ice_dummy_##type##_packet), \
- .offsets = ice_dummy_##type##_packet_offsets, \
+#define ICE_PKT_PROFILE(type, m) { \
+ .match = (m), \
+ .pkt = ice_dummy_##type##_packet, \
+ .pkt_len = sizeof(ice_dummy_##type##_packet), \
+ .offsets = ice_dummy_##type##_packet_offsets, \
+ .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
}
+ICE_DECLARE_PKT_OFFSETS(vlan) = {
+ { ICE_VLAN_OFOS, 12 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(vlan) = {
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+};
+
+ICE_DECLARE_PKT_OFFSETS(qinq) = {
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(qinq) = {
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+};
+
ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -506,38 +526,6 @@ ICE_DECLARE_PKT_TEMPLATE(udp) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
-ICE_DECLARE_PKT_OFFSETS(vlan_udp) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_UDP_ILOS, 38 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (801.1Q), IPv4:UDP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
-
- 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x11, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
- 0x00, 0x08, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* offset info for MAC + IPv4 + TCP dummy packet */
ICE_DECLARE_PKT_OFFSETS(tcp) = {
{ ICE_MAC_OFOS, 0 },
@@ -570,41 +558,6 @@ ICE_DECLARE_PKT_TEMPLATE(tcp) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
-ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_TCP_IL, 38 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (801.1Q), IPv4:TCP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
-
- 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x06, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x50, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -640,46 +593,6 @@ ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + TCP */
-ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_TCP_IL, 58 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (802.1Q), IPv6 + TCP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x50, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* IPv6 + UDP */
ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
@@ -717,43 +630,6 @@ ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + UDP */
-ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_UDP_ILOS, 58 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (802.1Q), IPv6 + UDP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
-
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
- 0x00, 0x08, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
@@ -1271,14 +1147,9 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
ICE_PKT_INNER_IPV6),
ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
- ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP |
- ICE_PKT_VLAN),
ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
- ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN),
ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
- ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN),
ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
- ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN),
ICE_PKT_PROFILE(tcp, 0),
};
@@ -4609,6 +4480,8 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, { 0, 2, 4, 6 } },
{ ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
{ ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
+ { ICE_VLAN_EX, { 2, 0 } },
+ { ICE_VLAN_IN, { 2, 0 } },
};
static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
@@ -4629,6 +4502,8 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, ICE_GRE_OF_HW },
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_VLAN_EX, ICE_VLAN_OF_HW },
+ { ICE_VLAN_IN, ICE_VLAN_OL_HW },
};
/**
@@ -5313,10 +5188,11 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
* ice_add_special_words - Add words that are not protocols, such as metadata
* @rinfo: other information regarding the rule e.g. priority and action info
* @lkup_exts: lookup word structure
+ * @dvm_ena: is double VLAN mode enabled
*/
static int
ice_add_special_words(struct ice_adv_rule_info *rinfo,
- struct ice_prot_lkup_ext *lkup_exts)
+ struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
{
u16 mask;
@@ -5335,6 +5211,19 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
}
}
+ if (rinfo->vlan_type != 0 && dvm_ena) {
+ if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
+ u8 word = lkup_exts->n_val_words++;
+
+ lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
+ lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
+ lkup_exts->field_mask[word] =
+ ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
+ } else {
+ return -ENOSPC;
+ }
+ }
+
return 0;
}
@@ -5454,7 +5343,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, lkup_exts);
+ status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
goto err_free_lkup_exts;
@@ -5555,6 +5444,79 @@ err_free_lkup_exts:
}
/**
+ * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
+ *
+ * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
+ * @num_vlan: number of VLAN tags
+ */
+static struct ice_dummy_pkt_profile *
+ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
+ u32 num_vlan)
+{
+ struct ice_dummy_pkt_profile *profile;
+ struct ice_dummy_pkt_offsets *offsets;
+ u32 buf_len, off, etype_off, i;
+ u8 *pkt;
+
+ if (num_vlan < 1 || num_vlan > 2)
+ return ERR_PTR(-EINVAL);
+
+ off = num_vlan * VLAN_HLEN;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
+ dummy_pkt->offsets_len;
+ offsets = kzalloc(buf_len, GFP_KERNEL);
+ if (!offsets)
+ return ERR_PTR(-ENOMEM);
+
+ offsets[0] = dummy_pkt->offsets[0];
+ if (num_vlan == 2) {
+ offsets[1] = ice_dummy_qinq_packet_offsets[0];
+ offsets[2] = ice_dummy_qinq_packet_offsets[1];
+ } else if (num_vlan == 1) {
+ offsets[1] = ice_dummy_vlan_packet_offsets[0];
+ }
+
+ for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
+ offsets[i + num_vlan].offset =
+ dummy_pkt->offsets[i].offset + off;
+ }
+ offsets[i + num_vlan] = dummy_pkt->offsets[i];
+
+ etype_off = dummy_pkt->offsets[1].offset;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
+ dummy_pkt->pkt_len;
+ pkt = kzalloc(buf_len, GFP_KERNEL);
+ if (!pkt) {
+ kfree(offsets);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(pkt, dummy_pkt->pkt, etype_off);
+ memcpy(pkt + etype_off,
+ num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
+ off);
+ memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
+ dummy_pkt->pkt_len - etype_off);
+
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ if (!profile) {
+ kfree(offsets);
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ profile->offsets = offsets;
+ profile->pkt = pkt;
+ profile->pkt_len = buf_len;
+ profile->match |= ICE_PKT_KMALLOC;
+
+ return profile;
+}
+
+/**
* ice_find_dummy_packet - find dummy packet
*
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5569,7 +5531,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
enum ice_sw_tunnel_type tun_type)
{
const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
- u32 match = 0;
+ u32 match = 0, vlan_count = 0;
u16 i;
switch (tun_type) {
@@ -5597,8 +5559,11 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
match |= ICE_PKT_INNER_TCP;
else if (lkups[i].type == ICE_IPV6_OFOS)
match |= ICE_PKT_OUTER_IPV6;
- else if (lkups[i].type == ICE_VLAN_OFOS)
- match |= ICE_PKT_VLAN;
+ else if (lkups[i].type == ICE_VLAN_OFOS ||
+ lkups[i].type == ICE_VLAN_EX)
+ vlan_count++;
+ else if (lkups[i].type == ICE_VLAN_IN)
+ vlan_count++;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
@@ -5620,6 +5585,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
while (ret->match && (match & ret->match) != ret->match)
ret++;
+ if (vlan_count != 0)
+ ret = ice_dummy_packet_add_vlan(ret, vlan_count);
+
return ret;
}
@@ -5678,6 +5646,8 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_ethtype_hdr);
break;
case ICE_VLAN_OFOS:
+ case ICE_VLAN_EX:
+ case ICE_VLAN_IN:
len = sizeof(struct ice_vlan_hdr);
break;
case ICE_IPV4_OFOS:
@@ -5783,6 +5753,36 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
}
/**
+ * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
+ * @vlan_type: VLAN tag type
+ * @pkt: dummy packet to fill in
+ * @offsets: offset info for the dummy packet
+ */
+static int
+ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
+ const struct ice_dummy_pkt_offsets *offsets)
+{
+ u16 i;
+
+ /* Find VLAN header and insert VLAN TPID */
+ for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ if (offsets[i].type == ICE_VLAN_OFOS ||
+ offsets[i].type == ICE_VLAN_EX) {
+ struct ice_vlan_hdr *hdr;
+ u16 offset;
+
+ offset = offsets[i].offset;
+ hdr = (struct ice_vlan_hdr *)&pkt[offset];
+ hdr->type = cpu_to_be16(vlan_type);
+
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+/**
* ice_find_adv_rule_entry - Search a rule entry
* @hw: pointer to the hardware structure
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5817,6 +5817,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
rinfo->tun_type == list_itr->rule_info.tun_type &&
+ rinfo->vlan_type == list_itr->rule_info.vlan_type &&
lkups_matched)
return list_itr;
}
@@ -5993,16 +5994,22 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* locate a dummy packet */
profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
+ if (IS_ERR(profile))
+ return PTR_ERR(profile);
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
- rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
- return -EIO;
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
+ status = -EIO;
+ goto free_pkt_profile;
+ }
vsi_handle = rinfo->sw_act.vsi_handle;
- if (!ice_is_vsi_valid(hw, vsi_handle))
- return -EINVAL;
+ if (!ice_is_vsi_valid(hw, vsi_handle)) {
+ status = -EINVAL;
+ goto free_pkt_profile;
+ }
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id =
@@ -6012,7 +6019,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
if (status)
- return status;
+ goto free_pkt_profile;
m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
if (m_entry) {
/* we have to add VSI to VSI_LIST and increment vsi_count.
@@ -6031,12 +6038,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
}
- return status;
+ goto free_pkt_profile;
}
rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
- if (!s_rule)
- return -ENOMEM;
+ if (!s_rule) {
+ status = -ENOMEM;
+ goto free_pkt_profile;
+ }
if (!rinfo->flags_info.act_valid) {
act |= ICE_SINGLE_ACT_LAN_ENABLE;
act |= ICE_SINGLE_ACT_LB_ENABLE;
@@ -6105,6 +6114,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
goto err_ice_add_adv_rule;
}
+ if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
+ status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
+ s_rule->hdr_data,
+ profile->offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+ }
+
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
NULL);
@@ -6150,6 +6167,13 @@ err_ice_add_adv_rule:
kfree(s_rule);
+free_pkt_profile:
+ if (profile->match & ICE_PKT_KMALLOC) {
+ kfree(profile->offsets);
+ kfree(profile->pkt);
+ kfree(profile);
+ }
+
return status;
}
@@ -6342,7 +6366,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, &lkup_exts);
+ status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
if (status)
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index eb641e5512d2..59488e3e9d6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -192,6 +192,7 @@ struct ice_adv_rule_info {
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
+ u16 vlan_type;
struct ice_adv_rule_flags_info flags_info;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 0a0c55fb8699..14795157846b 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -50,6 +50,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_VLAN)
lkups_cnt++;
+ /* is CVLAN specified? */
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ lkups_cnt++;
+
/* are IPv[4|6] fields specified? */
if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
@@ -134,6 +138,18 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
}
}
+static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
+{
+ switch (vlan_tpid) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ case ETH_P_QINQ1:
+ return vlan_tpid;
+ default:
+ return 0;
+ }
+}
+
static int
ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
struct ice_adv_lkup_elem *list)
@@ -269,8 +285,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
bool inner = false;
+ u16 vlan_tpid = 0;
int i = 0;
+ rule_info->vlan_type = vlan_tpid;
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
@@ -311,12 +330,26 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* copy VLAN info */
if (flags & ICE_TC_FLWR_FIELD_VLAN) {
- list[i].type = ICE_VLAN_OFOS;
+ vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
+ rule_info->vlan_type =
+ ice_check_supported_vlan_tpid(vlan_tpid);
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ list[i].type = ICE_VLAN_EX;
+ else
+ list[i].type = ICE_VLAN_OFOS;
list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
i++;
}
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].type = ICE_VLAN_IN;
+ list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ i++;
+ }
+
/* copy L3 (IPv[4|6]: src, dest) address */
if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
ICE_TC_FLWR_FIELD_SRC_IPV4)) {
@@ -524,6 +557,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
*/
fltr->rid = rule_added.rid;
fltr->rule_id = rule_added.rule_id;
+ fltr->dest_id = rule_added.vsi_handle;
exit:
kfree(list);
@@ -944,6 +978,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
@@ -993,7 +1028,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
n_proto_key = ntohs(match.key->n_proto);
n_proto_mask = ntohs(match.mask->n_proto);
- if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
+ if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
+ fltr->tunnel_type == TNL_GTPU ||
+ fltr->tunnel_type == TNL_GTPC) {
n_proto_key = 0;
n_proto_mask = 0;
} else {
@@ -1057,6 +1094,34 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
if (match.mask->vlan_priority)
headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_tpid)
+ headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan match;
+
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
+ return -EINVAL;
+ }
+
+ flow_rule_match_cvlan(rule, &match);
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Bad CVLAN mask");
+ return -EINVAL;
+ }
+ }
+
+ headers->cvlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
+ if (match.mask->vlan_priority)
+ headers->cvlan_hdr.vlan_prio = match.key->vlan_priority;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -1191,7 +1256,7 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
vsi->netdev->dev_addr);
- memset(fltr->outer_headers.l2_mask.dst_mac, 0xff, ETH_ALEN);
+ eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
}
/* validate specified dest MAC address, make sure either it belongs to
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index e25e958f4396..0193874cd203 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -23,6 +23,7 @@
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
+#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@@ -40,6 +41,7 @@ struct ice_tc_flower_action {
struct ice_tc_vlan_hdr {
__be16 vlan_id; /* Only last 12 bits valid */
u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_tpid;
};
struct ice_tc_l2_hdr {
@@ -81,6 +83,7 @@ struct ice_tc_flower_lyr_2_4_hdrs {
struct ice_tc_l2_hdr l2_key;
struct ice_tc_l2_hdr l2_mask;
struct ice_tc_vlan_hdr vlan_hdr;
+ struct ice_tc_vlan_hdr cvlan_hdr;
/* L3 (IPv4[6]) layer fields with their mask */
struct ice_tc_l3_hdr l3_key;
struct ice_tc_l3_hdr l3_mask;
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
index 1b618de592b7..bcda2e004807 100644
--- a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -199,7 +199,6 @@ static bool ice_is_dvm_supported(struct ice_hw *hw)
#define ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX 2
#define ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX 2
#define ICE_PKT_FLAGS_0_TO_15_FV_IDX 1
-#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = {
{
/* Update recipe ICE_SW_LKUP_VLAN to filter based on the
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index cbe92fd23a70..8d6e44ee1895 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2207,7 +2207,7 @@ out:
* igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
* @hw: pointer to the HW structure
*
- * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
* the values found in the EEPROM. This addresses an issue in which these
* bits are not restored from EEPROM after reset.
**/
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1277c5c7d099..205d577bdbba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -854,7 +854,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 68be2976f539..4f91a85fe0cc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1945,7 +1945,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
* However, when we do so, no frame from queue 2 and 3 are
* transmitted. It seems the MAX_TPKT_SIZE should not be great
* or _equal_ to the buffer size programmed in TXPBS. For this
- * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
+ * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64.
*/
val = (4096 - 1) / 64;
wr32(E1000_I210_DTXMXPKTSZ, val);
@@ -4819,8 +4819,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
+ /* Free all the Tx ring sk_buffs or xdp frames */
+ if (tx_buffer->type == IGB_TYPE_SKB)
+ dev_kfree_skb_any(tx_buffer->skb);
+ else
+ xdp_return_frame(tx_buffer->xdpf);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -9519,7 +9522,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
igb_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -9898,11 +9901,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
struct e1000_hw *hw = &adapter->hw;
u32 dmac_thr;
u16 hwm;
+ u32 reg;
if (hw->mac.type > e1000_82580) {
if (adapter->flags & IGB_FLAG_DMAC) {
- u32 reg;
-
/* force threshold to 0. */
wr32(E1000_DMCTXTH, 0);
@@ -9935,7 +9937,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
/* Disable BMC-to-OS Watchdog Enable */
if (hw->mac.type != e1000_i354)
reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
-
wr32(E1000_DMACR, reg);
/* no lower threshold to disable
@@ -9952,12 +9953,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
*/
wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
(IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+ }
- /* make low power state decision controlled
- * by DMA coal
- */
+ if (hw->mac.type >= e1000_i210 ||
+ (adapter->flags & IGB_FLAG_DMAC)) {
reg = rd32(E1000_PCIEMISC);
- reg &= ~E1000_PCIEMISC_LX_DECISION;
+ reg |= E1000_PCIEMISC_LX_DECISION;
wr32(E1000_PCIEMISC, reg);
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 975eb47ee04d..57d39ee00b58 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -227,7 +227,7 @@ struct igbvf_adapter {
/* The VF counters don't clear on read so we have to get a base
* count on driver start up and always subtract that base on
- * on the first update, thus the flag..
+ * the first update, thus the flag..
*/
struct e1000_vf_stats stats;
u64 zero_base;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 43ced78c3a2e..f4e91db89fe5 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2537,7 +2537,7 @@ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
igbvf_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 67b8ffd21d8a..a5c4b19d71a2 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -193,7 +193,7 @@ s32 igc_force_mac_fc(struct igc_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 653e9f1e35b5..8dbb9f903ca7 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -15,7 +15,6 @@
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
-#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
#define IGC_PTM_STAT_SLEEP 2
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index affdefcca7e3..45be9a1ab6af 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1187,7 +1187,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
iph = ip_hdr(skb);
iph->tot_len = 0;
@@ -1704,7 +1704,6 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
netdev->stats.tx_window_errors = 0;
}
-#define IXGB_MAX_INTR 10
/**
* ixgb_intr - Interrupt Handler
* @irq: interrupt number
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index f0cadd532c53..d40f96250691 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -141,8 +141,6 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define MAX_RDTR 0xFFFF
#define MIN_RDTR 0
-#define XSUMRX_DEFAULT OPTION_ENABLED
-
#define DEFAULT_FCRTL 0x28000
#define DEFAULT_FCRTH 0x30000
#define MIN_FCRTL 0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 72e6ebffea33..e85f7d2e8810 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -8,12 +8,10 @@
#include "ixgbe_sriov.h"
/* Callbacks for DCB netlink in the kernel */
-#define BIT_DCB_MODE 0x01
#define BIT_PFC 0x02
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
-#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 628d0eb0599f..04f453eabef6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -18,8 +18,6 @@
#include "ixgbe_phy.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBE_STATS};
struct ixgbe_stats {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5c62e9963650..0493326a0d6b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5161,7 +5161,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
}
/**
- * ixgbe_lpbthresh - calculate low water mark for for flow control
+ * ixgbe_lpbthresh - calculate low water mark for flow control
*
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 336426a67ac1..27a71fa26d3c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -138,7 +138,6 @@
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
-#define MAX_TIMADJ 0x7FFFFFFF
/**
* ixgbe_ptp_setup_sdp_X540
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index e4b50c7781ff..35c2b9b8bd19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1737,7 +1737,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for native SFP support.
+ * Configure the integrated PHY for native SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
@@ -1786,7 +1786,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for SFP support.
+ * Configure the integrated PHY for SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 3b41f83c8dff..fed46872af2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -17,8 +17,6 @@
#include "ixgbevf.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBEVF_STATS};
struct ixgbe_stats {
@@ -130,8 +128,6 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
adapter->msg_enable = data;
}
-#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
#define IXGBE_REGS_LEN 45
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 55b87bc3a938..2f12fbe229c1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4787,7 +4787,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
rtnl_unlock();
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 68fc32e36e88..1641d00d8ed3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -964,7 +964,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- /* if we we didn't get an ACK there must have been
+ /* if we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 57eff4e9e6de..b6be0552a6c1 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -775,7 +775,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
int tx_index;
struct tx_desc *desc;
int ret;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 384f5a16753d..0caa2df87c04 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2664,8 +2664,8 @@ err_drop_frame:
static inline void
mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ int hdr_len = skb_tcp_all_headers(skb);
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2727,7 +2727,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
if ((txq->count + tso_count_descs(skb)) >= txq->size)
return 0;
- if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
pr_info("*** Is this even possible?\n");
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index cc51149790ff..3d5d39a52fe6 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -52,7 +52,7 @@
#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF)
#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
-#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) && 0xFF)
+#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0xFF)
/* SDP Function select */
#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 25491edc35ce..931a1a7ebf76 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -847,6 +847,11 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
+ /* Disable all PFC classes by default */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
}
int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
@@ -899,6 +904,7 @@ int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
return 0;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
if (rx_pause) {
cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
@@ -910,12 +916,13 @@ int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
CGXX_SMUX_CBFC_CTL_DRP_EN);
}
- if (tx_pause)
+ if (tx_pause) {
cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
- else
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ } else {
cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
-
- cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ }
cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 47e83d7a5804..05666922a45b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -276,6 +276,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable all PFC classes */
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -387,15 +392,14 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
{
rpm_t *rpm = rpmd;
- u64 cfg;
+ u64 cfg, class_en;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
- /* reset PFC class quanta and threshold */
- rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
-
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
if (rx_pause) {
cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
@@ -410,9 +414,11 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (tx_pause) {
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, class_en);
} else {
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, 0, class_en);
}
if (!rx_pause && !tx_pause)
@@ -422,9 +428,7 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
- cfg = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, cfg);
- rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, class_en);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 9ab8d49dd180..8205f2626f61 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -48,7 +48,6 @@
#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
-#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
#define RPMX_CMR_RX_OVR_BP 0x4120
#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
@@ -70,7 +69,7 @@
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
-#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+#define RPM_DEFAULT_PAUSE_TIME 0x7FF
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index a9da85e418a4..38bbae5d9ae0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -17,7 +17,7 @@
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
-#define CPT_CTX_ILEN 2
+#define CPT_CTX_ILEN 2ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
@@ -480,7 +480,7 @@ static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
*/
if (!is_rvu_otx2(rvu)) {
val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
- val |= rvu->hw->cpt_chan_base;
+ val |= (u64)rvu->hw->cpt_chan_base;
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 3a31fb8cc155..e05fd2b9a929 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2534,7 +2534,7 @@ alloc:
/* Copy MCAM entry indices into mbox response entry_list.
* Requester always expects indices in ascending order, so
- * so reverse the list if reverse bitmap is used for allocation.
+ * reverse the list if reverse bitmap is used for allocation.
*/
if (!req->contig && rsp->count) {
index = 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 3baeafc40807..a18e8efd0f1e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -624,7 +624,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->subdc = NIX_SUBDC_EXT;
if (skb_shinfo(skb)->gso_size) {
ext->lso = 1;
- ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ext->lso_sb = skb_tcp_all_headers(skb);
ext->lso_mps = skb_shinfo(skb)->gso_size;
/* Only TSOv4 and TSOv6 GSO offloads are supported */
@@ -931,7 +931,7 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
* be correctly modified, hence don't offload such TSO segments.
*/
- payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ payload_len = skb->len - skb_tcp_all_headers(skb);
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
if (last_seg_size && last_seg_size < 16)
return false;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 6f754ae2a584..0bb46eee46b4 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -107,7 +107,8 @@ struct prestera_port_phy_config {
struct prestera_port {
struct net_device *dev;
struct prestera_switch *sw;
- struct prestera_flow_block *flow_block;
+ struct prestera_flow_block *ingress_flow_block;
+ struct prestera_flow_block *egress_flow_block;
struct devlink_port dl_port;
struct list_head lag_member;
struct prestera_lag *lag;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 3a141f2db812..3d4b85f2d541 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -61,6 +61,7 @@ struct prestera_acl_ruleset {
u32 index;
u16 pcl_id;
bool offload;
+ bool ingress;
};
struct prestera_acl_vtcam {
@@ -70,6 +71,7 @@ struct prestera_acl_vtcam {
u32 id;
bool is_keymask_set;
u8 lookup;
+ u8 direction;
};
static const struct rhashtable_params prestera_acl_ruleset_ht_params = {
@@ -93,23 +95,36 @@ static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
.automatic_shrinking = true,
};
-int prestera_acl_chain_to_client(u32 chain_index, u32 *client)
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client)
{
- static const u32 client_map[] = {
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2
+ static const u32 ingress_client_map[] = {
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2
};
- if (chain_index >= ARRAY_SIZE(client_map))
+ if (!ingress) {
+ /* prestera supports only one chain on egress */
+ if (chain_index > 0)
+ return -EINVAL;
+
+ *client = PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP;
+ return 0;
+ }
+
+ if (chain_index >= ARRAY_SIZE(ingress_client_map))
return -EINVAL;
- *client = client_map[chain_index];
+ *client = ingress_client_map[chain_index];
return 0;
}
-static bool prestera_acl_chain_is_supported(u32 chain_index)
+static bool prestera_acl_chain_is_supported(u32 chain_index, bool ingress)
{
+ if (!ingress)
+ /* prestera supports only one chain on egress */
+ return chain_index == 0;
+
return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0;
}
@@ -122,7 +137,7 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
u32 uid = 0;
int err;
- if (!prestera_acl_chain_is_supported(chain_index))
+ if (!prestera_acl_chain_is_supported(chain_index, block->ingress))
return ERR_PTR(-EINVAL);
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
@@ -130,6 +145,7 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
return ERR_PTR(-ENOMEM);
ruleset->acl = acl;
+ ruleset->ingress = block->ingress;
ruleset->ht_key.block = block;
ruleset->ht_key.chain_index = chain_index;
refcount_set(&ruleset->refcount, 1);
@@ -172,13 +188,18 @@ int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
struct prestera_acl_iface iface;
u32 vtcam_id;
+ int dir;
int err;
+ dir = ruleset->ingress ?
+ PRESTERA_HW_VTCAM_DIR_INGRESS : PRESTERA_HW_VTCAM_DIR_EGRESS;
+
if (ruleset->offload)
return -EEXIST;
err = prestera_acl_vtcam_id_get(ruleset->acl,
ruleset->ht_key.chain_index,
+ dir,
ruleset->keymask, &vtcam_id);
if (err)
goto err_vtcam_create;
@@ -719,7 +740,7 @@ vtcam_found:
return 0;
}
-int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
void *keymask, u32 *vtcam_id)
{
struct prestera_acl_vtcam *vtcam;
@@ -731,7 +752,8 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
* fine for now
*/
list_for_each_entry(vtcam, &acl->vtcam_list, list) {
- if (lookup != vtcam->lookup)
+ if (lookup != vtcam->lookup ||
+ dir != vtcam->direction)
continue;
if (!keymask && !vtcam->is_keymask_set) {
@@ -752,7 +774,7 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
return -ENOMEM;
err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id,
- PRESTERA_HW_VTCAM_DIR_INGRESS);
+ dir);
if (err) {
kfree(vtcam);
@@ -765,6 +787,7 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
return 0;
}
+ vtcam->direction = dir;
vtcam->id = new_vtcam_id;
vtcam->lookup = lookup;
if (keymask) {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index f963e1e0c0f0..03fc5b9dc925 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -199,9 +199,9 @@ void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
u16 pcl_id);
-int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
void *keymask, u32 *vtcam_id);
int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
-int prestera_acl_chain_to_client(u32 chain_index, u32 *client);
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client);
#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index 05c3ad98eba9..2262693bd5cf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -75,7 +75,9 @@ static void prestera_flow_block_destroy(void *cb_priv)
}
static struct prestera_flow_block *
-prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
+prestera_flow_block_create(struct prestera_switch *sw,
+ struct net *net,
+ bool ingress)
{
struct prestera_flow_block *block;
@@ -87,6 +89,7 @@ prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
INIT_LIST_HEAD(&block->template_list);
block->net = net;
block->sw = sw;
+ block->ingress = ingress;
return block;
}
@@ -165,7 +168,8 @@ static int prestera_flow_block_unbind(struct prestera_flow_block *block,
static struct prestera_flow_block *
prestera_flow_block_get(struct prestera_switch *sw,
struct flow_block_offload *f,
- bool *register_block)
+ bool *register_block,
+ bool ingress)
{
struct prestera_flow_block *block;
struct flow_block_cb *block_cb;
@@ -173,7 +177,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
block_cb = flow_block_cb_lookup(f->block,
prestera_flow_block_cb, sw);
if (!block_cb) {
- block = prestera_flow_block_create(sw, f->net);
+ block = prestera_flow_block_create(sw, f->net, ingress);
if (!block)
return ERR_PTR(-ENOMEM);
@@ -209,7 +213,7 @@ static void prestera_flow_block_put(struct prestera_flow_block *block)
}
static int prestera_setup_flow_block_bind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -217,7 +221,7 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
bool register_block;
int err;
- block = prestera_flow_block_get(sw, f, &register_block);
+ block = prestera_flow_block_get(sw, f, &register_block, ingress);
if (IS_ERR(block))
return PTR_ERR(block);
@@ -232,7 +236,11 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
list_add_tail(&block_cb->driver_list, &prestera_block_cb_list);
}
- port->flow_block = block;
+ if (ingress)
+ port->ingress_flow_block = block;
+ else
+ port->egress_flow_block = block;
+
return 0;
err_block_bind:
@@ -242,7 +250,7 @@ err_block_bind:
}
static void prestera_setup_flow_block_unbind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -266,24 +274,38 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
list_del(&block_cb->driver_list);
}
error:
- port->flow_block = NULL;
+ if (ingress)
+ port->ingress_flow_block = NULL;
+ else
+ port->egress_flow_block = NULL;
}
-int prestera_flow_block_setup(struct prestera_port *port,
- struct flow_block_offload *f)
+static int prestera_setup_flow_block_clsact(struct prestera_port *port,
+ struct flow_block_offload *f,
+ bool ingress)
{
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
f->driver_block_list = &prestera_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- return prestera_setup_flow_block_bind(port, f);
+ return prestera_setup_flow_block_bind(port, f, ingress);
case FLOW_BLOCK_UNBIND:
- prestera_setup_flow_block_unbind(port, f);
+ prestera_setup_flow_block_unbind(port, f, ingress);
return 0;
default:
return -EOPNOTSUPP;
}
}
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ switch (f->binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ return prestera_setup_flow_block_clsact(port, f, true);
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ return prestera_setup_flow_block_clsact(port, f, false);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 6550278b166a..0c9e13263261 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -23,6 +23,7 @@ struct prestera_flow_block {
struct flow_block_cb *block_cb;
struct list_head template_list;
unsigned int rule_count;
+ bool ingress;
};
int prestera_flow_block_setup(struct prestera_port *port,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index d43e503c644f..a54748ac6541 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -79,7 +79,7 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
} else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
/* setup counter first */
rule->re_arg.count.valid = true;
- err = prestera_acl_chain_to_client(chain_index,
+ err = prestera_acl_chain_to_client(chain_index, block->ingress,
&rule->re_arg.count.client);
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 579d9ba23ffc..aa74f668aa3c 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -123,9 +123,10 @@ enum prestera_hw_vtcam_direction_t {
};
enum {
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0 = 0,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1 = 1,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0 = 0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1 = 1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP = 3,
};
struct prestera_switch;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index a1e907c85217..bbea5458000b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1863,7 +1863,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (mss != 0) {
if (!(hw->flags & SKY2_HW_NEW_LE))
- mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ mss += skb_tcp_all_headers(skb);
if (mss != sky2->tx_last_mss) {
le = get_tx_le(sky2, &slot);
@@ -4711,7 +4711,7 @@ static irqreturn_t sky2_test_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Test interrupt path by forcing a a software IRQ */
+/* Test interrupt path by forcing a software IRQ */
static int sky2_test_msi(struct sky2_hw *hw)
{
struct pci_dev *pdev = hw->pdev;
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 95839fd84dab..3f0e5e64de50 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
@@ -32,6 +33,7 @@
#define MTK_STAR_SKB_ALIGNMENT 16
#define MTK_STAR_HASHTABLE_MC_LIMIT 256
#define MTK_STAR_HASHTABLE_SIZE_MAX 512
+#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
* work for this controller.
@@ -129,6 +131,11 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_INT_MASK 0x0054
#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
+/* Delay-Macro Register */
+#define MTK_STAR_REG_TEST0 0x0058
+#define MTK_STAR_BIT_INV_RX_CLK BIT(30)
+#define MTK_STAR_BIT_INV_TX_CLK BIT(31)
+
/* Misc. Config Register */
#define MTK_STAR_REG_TEST1 0x005c
#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
@@ -149,6 +156,7 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
#define MTK_STAR_BIT_CLK_DIV_10 0x0a
+#define MTK_STAR_BIT_CLK_DIV_50 0x32
/* Counter registers. */
#define MTK_STAR_REG_C_RXOKPKT 0x0100
@@ -181,9 +189,14 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_C_RX_TWIST 0x0218
/* Ethernet CFG Control */
-#define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4
-#define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0)
-#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0)
+#define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4
+#define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8
+#define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10
+#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0
+#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8)
/* Represents the actual structure of descriptors used by the MAC. We can
* reuse the same structure for both TX and RX - the layout is the same, only
@@ -216,7 +229,8 @@ struct mtk_star_ring_desc_data {
struct sk_buff *skb;
};
-#define MTK_STAR_RING_NUM_DESCS 128
+#define MTK_STAR_RING_NUM_DESCS 512
+#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
@@ -231,6 +245,11 @@ struct mtk_star_ring {
unsigned int tail;
};
+struct mtk_star_compat {
+ int (*set_interface_mode)(struct net_device *ndev);
+ unsigned char bit_clk_div;
+};
+
struct mtk_star_priv {
struct net_device *ndev;
@@ -246,7 +265,8 @@ struct mtk_star_priv {
struct mtk_star_ring rx_ring;
struct mii_bus *mii;
- struct napi_struct napi;
+ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
struct device_node *phy_node;
phy_interface_t phy_intf;
@@ -255,6 +275,11 @@ struct mtk_star_priv {
int speed;
int duplex;
int pause;
+ bool rmii_rxc;
+ bool rx_inv;
+ bool tx_inv;
+
+ const struct mtk_star_compat *compat_data;
/* Protects against concurrent descriptor access. */
spinlock_t lock;
@@ -357,19 +382,16 @@ mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
mtk_star_ring_push_head(ring, desc_data, flags);
}
-static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
+static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
{
- return abs(ring->head - ring->tail);
-}
+ u32 avail;
-static bool mtk_star_ring_full(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
-}
+ if (ring->tail > ring->head)
+ avail = ring->tail - ring->head - 1;
+ else
+ avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
-static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) > 0;
+ return avail;
}
static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
@@ -414,6 +436,36 @@ static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
MTK_STAR_BIT_MAC_CFG_NIC_PD);
}
+static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value &= ~MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value &= ~MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
+static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value |= MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value |= MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
/* Unmask the three interrupts we care about, mask all others. */
static void mtk_star_intr_enable(struct mtk_star_priv *priv)
{
@@ -429,20 +481,11 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
}
-static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
-{
- unsigned int val;
-
- regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
-
- return val;
-}
-
static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
{
unsigned int val;
- val = mtk_star_intr_read(priv);
+ regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
return val;
@@ -714,25 +757,44 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
}
-/* All processing for TX and RX happens in the napi poll callback.
- *
- * FIXME: The interrupt handling should be more fine-grained with each
- * interrupt enabled/disabled independently when needed. Unfortunatly this
- * turned out to impact the driver's stability and until we have something
- * working properly, we're disabling all interrupts during TX & RX processing
- * or when resetting the counter registers.
- */
+/**
+ * mtk_star_handle_irq - Interrupt Handler.
+ * @irq: interrupt number.
+ * @data: pointer to a network interface device structure.
+ * Description : this is the driver interrupt service routine.
+ * it mainly handles:
+ * 1. tx complete interrupt for frame transmission.
+ * 2. rx complete interrupt for frame reception.
+ * 3. MAC Management Counter interrupt to avoid counter overflow.
+ **/
static irqreturn_t mtk_star_handle_irq(int irq, void *data)
{
- struct mtk_star_priv *priv;
- struct net_device *ndev;
-
- ndev = data;
- priv = netdev_priv(ndev);
+ struct net_device *ndev = data;
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ unsigned int intr_status = mtk_star_intr_ack_all(priv);
+ bool rx, tx;
+
+ rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
+ napi_schedule_prep(&priv->rx_napi);
+ tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
+ napi_schedule_prep(&priv->tx_napi);
+
+ if (rx || tx) {
+ spin_lock(&priv->lock);
+ /* mask Rx and TX Complete interrupt */
+ mtk_star_disable_dma_irq(priv, rx, tx);
+ spin_unlock(&priv->lock);
+
+ if (rx)
+ __napi_schedule(&priv->rx_napi);
+ if (tx)
+ __napi_schedule(&priv->tx_napi);
+ }
- if (netif_running(ndev)) {
- mtk_star_intr_disable(priv);
- napi_schedule(&priv->napi);
+ /* interrupt is triggered once any counters reach 0x8000000 */
+ if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
+ mtk_star_update_stats(priv);
+ mtk_star_reset_counters(priv);
}
return IRQ_HANDLED;
@@ -821,32 +883,26 @@ static void mtk_star_phy_config(struct mtk_star_priv *priv)
val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
- /* Only full-duplex supported for now. */
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
-
- regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
-
if (priv->pause) {
- val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
- val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
- val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
} else {
- val = 0;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
}
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
+ val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
+ val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
+ val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
- if (priv->pause) {
- val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
- val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
- } else {
- val = 0;
- }
-
+ val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
+ val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
}
@@ -898,14 +954,7 @@ static void mtk_star_init_config(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
MTK_STAR_MSK_MAC_CLK_CONF,
- MTK_STAR_BIT_CLK_DIV_10);
-}
-
-static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
-{
- regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
- MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
- MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
+ priv->compat_data->bit_clk_div);
}
static int mtk_star_enable(struct net_device *ndev)
@@ -951,11 +1000,12 @@ static int mtk_star_enable(struct net_device *ndev)
/* Request the interrupt */
ret = request_irq(ndev->irq, mtk_star_handle_irq,
- IRQF_TRIGGER_FALLING, ndev->name, ndev);
+ IRQF_TRIGGER_NONE, ndev->name, ndev);
if (ret)
goto err_free_skbs;
- napi_enable(&priv->napi);
+ napi_enable(&priv->tx_napi);
+ napi_enable(&priv->rx_napi);
mtk_star_intr_ack_all(priv);
mtk_star_intr_enable(priv);
@@ -988,7 +1038,8 @@ static void mtk_star_disable(struct net_device *ndev)
struct mtk_star_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
- napi_disable(&priv->napi);
+ napi_disable(&priv->tx_napi);
+ napi_disable(&priv->rx_napi);
mtk_star_intr_disable(priv);
mtk_star_dma_disable(priv);
mtk_star_intr_ack_all(priv);
@@ -1020,13 +1071,45 @@ static int mtk_star_netdev_ioctl(struct net_device *ndev,
return phy_mii_ioctl(ndev->phydev, req, cmd);
}
-static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
+static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ netif_stop_queue(priv->ndev);
+
+ /* Might race with mtk_star_tx_poll, check again */
+ smp_mb();
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
+ return -EBUSY;
+
+ netif_start_queue(priv->ndev);
+
+ return 0;
+}
+
+static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
+ return 0;
+
+ return __mtk_star_maybe_stop_tx(priv, size);
+}
+
+static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct mtk_star_ring *ring = &priv->tx_ring;
struct device *dev = mtk_star_get_dev(priv);
struct mtk_star_ring_desc_data desc_data;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ /* This is a hard error, log it. */
+ pr_err_ratelimited("Tx ring full when queue awake\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
if (dma_mapping_error(dev, desc_data.dma_addr))
@@ -1034,17 +1117,11 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
desc_data.skb = skb;
desc_data.len = skb->len;
-
- spin_lock_bh(&priv->lock);
-
mtk_star_ring_push_head_tx(ring, &desc_data);
netdev_sent_queue(ndev, skb->len);
- if (mtk_star_ring_full(ring))
- netif_stop_queue(ndev);
-
- spin_unlock_bh(&priv->lock);
+ mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
mtk_star_dma_resume_tx(priv);
@@ -1076,31 +1153,40 @@ static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
return ret;
}
-static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
+static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
{
+ struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
+ tx_napi);
+ int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
struct mtk_star_ring *ring = &priv->tx_ring;
struct net_device *ndev = priv->ndev;
- int ret, pkts_compl, bytes_compl;
- bool wake = false;
-
- spin_lock(&priv->lock);
-
- for (pkts_compl = 0, bytes_compl = 0;;
- pkts_compl++, bytes_compl += ret, wake = true) {
- if (!mtk_star_ring_descs_available(ring))
- break;
+ unsigned int head = ring->head;
+ unsigned int entry = ring->tail;
+ while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
if (ret < 0)
break;
+
+ count++;
+ pkts_compl++;
+ bytes_compl += ret;
+ entry = ring->tail;
}
netdev_completed_queue(ndev, pkts_compl, bytes_compl);
- if (wake && netif_queue_stopped(ndev))
+ if (unlikely(netif_queue_stopped(ndev)) &&
+ (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
netif_wake_queue(ndev);
- spin_unlock(&priv->lock);
+ if (napi_complete(napi)) {
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, false, true);
+ spin_unlock(&priv->lock);
+ }
+
+ return 0;
}
static void mtk_star_netdev_get_stats64(struct net_device *ndev,
@@ -1180,7 +1266,7 @@ static const struct ethtool_ops mtk_star_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
{
struct mtk_star_ring *ring = &priv->rx_ring;
struct device *dev = mtk_star_get_dev(priv);
@@ -1188,107 +1274,85 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
struct net_device *ndev = priv->ndev;
struct sk_buff *curr_skb, *new_skb;
dma_addr_t new_dma_addr;
- int ret;
+ int ret, count = 0;
- spin_lock(&priv->lock);
- ret = mtk_star_ring_pop_tail(ring, &desc_data);
- spin_unlock(&priv->lock);
- if (ret)
- return -1;
+ while (count < budget) {
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ if (ret)
+ return -1;
- curr_skb = desc_data.skb;
+ curr_skb = desc_data.skb;
- if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
- (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
- /* Error packet -> drop and reuse skb. */
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
+ (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
+ /* Error packet -> drop and reuse skb. */
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- /* Prepare new skb before receiving the current one. Reuse the current
- * skb if we fail at any point.
- */
- new_skb = mtk_star_alloc_skb(ndev);
- if (!new_skb) {
- ndev->stats.rx_dropped++;
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ /* Prepare new skb before receiving the current one.
+ * Reuse the current skb if we fail at any point.
+ */
+ new_skb = mtk_star_alloc_skb(ndev);
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
- if (dma_mapping_error(dev, new_dma_addr)) {
- ndev->stats.rx_dropped++;
- dev_kfree_skb(new_skb);
- new_skb = curr_skb;
- netdev_err(ndev, "DMA mapping error of RX descriptor\n");
- goto push_new_skb;
- }
+ new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
+ if (dma_mapping_error(dev, new_dma_addr)) {
+ ndev->stats.rx_dropped++;
+ dev_kfree_skb(new_skb);
+ new_skb = curr_skb;
+ netdev_err(ndev, "DMA mapping error of RX descriptor\n");
+ goto push_new_skb;
+ }
- /* We can't fail anymore at this point: it's safe to unmap the skb. */
- mtk_star_dma_unmap_rx(priv, &desc_data);
+ /* We can't fail anymore at this point:
+ * it's safe to unmap the skb.
+ */
+ mtk_star_dma_unmap_rx(priv, &desc_data);
- skb_put(desc_data.skb, desc_data.len);
- desc_data.skb->ip_summed = CHECKSUM_NONE;
- desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
- desc_data.skb->dev = ndev;
- netif_receive_skb(desc_data.skb);
+ skb_put(desc_data.skb, desc_data.len);
+ desc_data.skb->ip_summed = CHECKSUM_NONE;
+ desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
+ desc_data.skb->dev = ndev;
+ netif_receive_skb(desc_data.skb);
- /* update dma_addr for new skb */
- desc_data.dma_addr = new_dma_addr;
+ /* update dma_addr for new skb */
+ desc_data.dma_addr = new_dma_addr;
push_new_skb:
- desc_data.len = skb_tailroom(new_skb);
- desc_data.skb = new_skb;
- spin_lock(&priv->lock);
- mtk_star_ring_push_head_rx(ring, &desc_data);
- spin_unlock(&priv->lock);
+ count++;
- return 0;
-}
-
-static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
-{
- int received, ret;
-
- for (received = 0, ret = 0; received < budget && ret == 0; received++)
- ret = mtk_star_receive_packet(priv);
+ desc_data.len = skb_tailroom(new_skb);
+ desc_data.skb = new_skb;
+ mtk_star_ring_push_head_rx(ring, &desc_data);
+ }
mtk_star_dma_resume_rx(priv);
- return received;
+ return count;
}
-static int mtk_star_poll(struct napi_struct *napi, int budget)
+static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
- unsigned int status;
- int received = 0;
-
- priv = container_of(napi, struct mtk_star_priv, napi);
-
- status = mtk_star_intr_read(priv);
- mtk_star_intr_ack_all(priv);
+ int work_done = 0;
- if (status & MTK_STAR_BIT_INT_STS_TNTC)
- /* Clean-up all TX descriptors. */
- mtk_star_tx_complete_all(priv);
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
- if (status & MTK_STAR_BIT_INT_STS_FNRC)
- /* Receive up to $budget packets. */
- received = mtk_star_process_rx(priv, budget);
-
- if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
- mtk_star_update_stats(priv);
- mtk_star_reset_counters(priv);
+ work_done = mtk_star_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, true, false);
+ spin_unlock(&priv->lock);
}
- if (received < budget)
- napi_complete_done(napi, received);
-
- mtk_star_intr_enable(priv);
-
- return received;
+ return work_done;
}
static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
@@ -1442,6 +1506,25 @@ static void mtk_star_clk_disable_unprepare(void *data)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+static int mtk_star_set_timing(struct mtk_star_priv *priv)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int delay_val = 0;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
+}
+
static int mtk_star_probe(struct platform_device *pdev)
{
struct device_node *of_node;
@@ -1460,6 +1543,7 @@ static int mtk_star_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
+ priv->compat_data = of_device_get_match_data(&pdev->dev);
SET_NETDEV_DEV(ndev, dev);
platform_set_drvdata(pdev, ndev);
@@ -1510,7 +1594,8 @@ static int mtk_star_probe(struct platform_device *pdev)
ret = of_get_phy_mode(of_node, &priv->phy_intf);
if (ret) {
return ret;
- } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
+ } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
+ priv->phy_intf != PHY_INTERFACE_MODE_MII) {
dev_err(dev, "unsupported phy mode: %s\n",
phy_modes(priv->phy_intf));
return -EINVAL;
@@ -1522,7 +1607,23 @@ static int mtk_star_probe(struct platform_device *pdev)
return -ENODEV;
}
- mtk_star_set_mode_rmii(priv);
+ priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
+ priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
+ priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
+
+ if (priv->compat_data->set_interface_mode) {
+ ret = priv->compat_data->set_interface_mode(ndev);
+ if (ret) {
+ dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
+ return -EINVAL;
+ }
+ }
+
+ ret = mtk_star_set_timing(priv);
+ if (ret) {
+ dev_err(dev, "Failed to set timing, err = %d\n", ret);
+ return -EINVAL;
+ }
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1550,16 +1651,92 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll,
+ NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
return devm_register_netdev(dev, ndev);
}
#ifdef CONFIG_OF
+static int mt8516_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val, ret, rmii_rxc;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ rmii_rxc = 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG1_CON,
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
+ rmii_rxc);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG0_CON,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
+ intf_val);
+}
+
+static int mt8365_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG_CON_V2,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
+ intf_val);
+}
+
+static const struct mtk_star_compat mtk_star_mt8516_compat = {
+ .set_interface_mode = mt8516_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
+};
+
+static const struct mtk_star_compat mtk_star_mt8365_compat = {
+ .set_interface_mode = mt8365_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
+};
+
static const struct of_device_id mtk_star_of_match[] = {
- { .compatible = "mediatek,mt8516-eth", },
- { .compatible = "mediatek,mt8518-eth", },
- { .compatible = "mediatek,mt8175-eth", },
+ { .compatible = "mediatek,mt8516-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8518-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8175-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8365-eth",
+ .data = &mtk_star_mt8365_compat },
{ }
};
MODULE_DEVICE_TABLE(of, mtk_star_of_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index af3b2b59a2a6..43a4102e9c09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -645,7 +645,7 @@ static int get_real_size(const struct sk_buff *skb,
*inline_ok = false;
*hopbyhop = 0;
if (skb->encapsulation) {
- *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+ *lso_header_size = skb_inner_tcp_all_headers(skb);
} else {
/* Detects large IPV6 TCP packets and prepares for removal of
* HBH header that has been pushed by ip6_xmit(),
@@ -653,7 +653,7 @@ static int get_real_size(const struct sk_buff *skb,
*/
if (ipv6_has_hopopt_jumbo(skb))
*hopbyhop = sizeof(struct hop_jumbo_hdr);
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ *lso_header_size = skb_tcp_all_headers(skb);
}
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9ea867a45764..5dadc2fce7ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o lib/tout.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o
#
# Netdev basic
@@ -45,7 +45,8 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
- en/tc/post_act.o en/tc/int_port.o
+ en/tc/post_act.o en/tc/int_port.o en/tc/meter.o \
+ en/tc/post_meter.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
@@ -53,7 +54,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
en/tc/act/mirred.o en/tc/act/mirred_nic.o \
en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
- en/tc/act/redirect_ingress.o
+ en/tc/act/redirect_ingress.o en/tc/act/police.o
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index 2755c25ba324..305fde62a78d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -30,7 +30,7 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
NULL, /* FLOW_ACTION_WAKE, */
NULL, /* FLOW_ACTION_QUEUE, */
&mlx5e_tc_act_sample,
- NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_police,
&mlx5e_tc_act_ct,
NULL, /* FLOW_ACTION_CT_METADATA, */
&mlx5e_tc_act_mpls_push,
@@ -106,8 +106,8 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
{
memset(parse_state, 0, sizeof(*parse_state));
parse_state->flow = flow;
- parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
+ parse_state->flow_action = flow_action;
}
void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index f34714c5ddd4..095ff8ef80e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -13,7 +13,7 @@
struct mlx5_flow_attr;
struct mlx5e_tc_act_parse_state {
- unsigned int num_actions;
+ struct flow_action *flow_action;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
u32 actions;
@@ -76,6 +76,7 @@ extern struct mlx5e_tc_act mlx5e_tc_act_ct;
extern struct mlx5e_tc_act mlx5e_tc_act_sample;
extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
+extern struct mlx5e_tc_act mlx5e_tc_act_police;
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
new file mode 100644
index 000000000000..ab32fe6a2e57
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ if (mlx5e_policer_validate(parse_state->flow_action, act,
+ parse_state->extack))
+ return false;
+
+ return !!mlx5e_get_flow_meters(parse_state->flow->priv->mdev);
+}
+
+static int
+tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_flow_meter_params *params;
+
+ params = &attr->meter_attr.params;
+ params->index = act->hw_index;
+ if (act->police.rate_bytes_ps) {
+ params->mode = MLX5_RATE_LIMIT_BPS;
+ /* change rate to bits per second */
+ params->rate = act->police.rate_bytes_ps << 3;
+ params->burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps) {
+ params->mode = MLX5_RATE_LIMIT_PPS;
+ params->rate = act->police.rate_pkt_ps;
+ params->burst = act->police.burst_pkt;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
+ attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
+
+ return 0;
+}
+
+static bool
+tc_act_is_multi_table_act_police(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ return true;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_police = {
+ .can_offload = tc_act_can_offload_police,
+ .parse_action = tc_act_parse_police,
+ .is_multi_table_act = tc_act_is_multi_table_act_police,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index a7d9eab19e4a..53b270f652b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -12,7 +12,7 @@ tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
{
struct netlink_ext_ack *extack = parse_state->extack;
- if (parse_state->num_actions != 1) {
+ if (parse_state->flow_action->num_entries != 1) {
NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
new file mode 100644
index 000000000000..ca33f673396f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/math64.h>
+#include "lib/aso.h"
+#include "en/tc/post_act.h"
+#include "meter.h"
+#include "en/tc_priv.h"
+#include "post_meter.h"
+
+#define MLX5_START_COLOR_SHIFT 28
+#define MLX5_METER_MODE_SHIFT 24
+#define MLX5_CBS_EXP_SHIFT 24
+#define MLX5_CBS_MAN_SHIFT 16
+#define MLX5_CIR_EXP_SHIFT 8
+
+/* cir = 8*(10^9)*cir_mantissa/(2^cir_exponent)) bits/s */
+#define MLX5_CONST_CIR 8000000000ULL
+#define MLX5_CALC_CIR(m, e) ((MLX5_CONST_CIR * (m)) >> (e))
+#define MLX5_MAX_CIR ((MLX5_CONST_CIR * 0x100) - 1)
+
+/* cbs = cbs_mantissa*2^cbs_exponent */
+#define MLX5_CALC_CBS(m, e) ((m) << (e))
+#define MLX5_MAX_CBS ((0x100ULL << 0x1F) - 1)
+#define MLX5_MAX_HW_CBS 0x7FFFFFFF
+
+struct mlx5e_flow_meter_aso_obj {
+ struct list_head entry;
+ int base_id;
+ int total_meters;
+
+ unsigned long meters_map[0]; /* must be at the end of this struct */
+};
+
+struct mlx5e_flow_meters {
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_aso *aso;
+ struct mutex aso_lock; /* Protects aso operations */
+ int log_granularity;
+ u32 pdn;
+
+ DECLARE_HASHTABLE(hashtbl, 8);
+
+ struct mutex sync_lock; /* protect flow meter operations */
+ struct list_head partial_list;
+ struct list_head full_list;
+
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_post_act *post_act;
+
+ struct mlx5e_post_meter_priv *post_meter;
+};
+
+static void
+mlx5e_flow_meter_cir_calc(u64 cir, u8 *man, u8 *exp)
+{
+ s64 _cir, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cir << e;
+ if ((s64)m < 0) /* overflow */
+ break;
+ m = div64_u64(m, MLX5_CONST_CIR);
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cir = MLX5_CALC_CIR(m, e);
+ _delta = cir - _cir;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+static void
+mlx5e_flow_meter_cbs_calc(u64 cbs, u8 *man, u8 *exp)
+{
+ s64 _cbs, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cbs >> e;
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cbs = MLX5_CALC_CBS(m, e);
+ _delta = cbs - _cbs;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params)
+{
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl;
+ struct mlx5_wqe_aso_data_seg *aso_data;
+ struct mlx5e_flow_meters *flow_meters;
+ u8 cir_man, cir_exp, cbs_man, cbs_exp;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *aso;
+ u64 rate, burst;
+ u8 ds_cnt;
+ int err;
+
+ rate = meter_params->rate;
+ burst = meter_params->burst;
+
+ /* HW treats each packet as 128 bytes in PPS mode */
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS) {
+ rate <<= 10;
+ burst <<= 7;
+ }
+
+ if (!rate || rate > MLX5_MAX_CIR || !burst || burst > MLX5_MAX_CBS)
+ return -EINVAL;
+
+ /* HW has limitation of total 31 bits for cbs */
+ if (burst > MLX5_MAX_HW_CBS) {
+ mlx5_core_warn(mdev,
+ "burst(%lld) is too large, use HW allowed value(%d)\n",
+ burst, MLX5_MAX_HW_CBS);
+ burst = MLX5_MAX_HW_CBS;
+ }
+
+ mlx5_core_dbg(mdev, "meter mode=%d\n", meter_params->mode);
+ mlx5e_flow_meter_cir_calc(rate, &cir_man, &cir_exp);
+ mlx5_core_dbg(mdev, "rate=%lld, cir=%lld, exp=%d, man=%d\n",
+ rate, MLX5_CALC_CIR(cir_man, cir_exp), cir_exp, cir_man);
+ mlx5e_flow_meter_cbs_calc(burst, &cbs_man, &cbs_exp);
+ mlx5_core_dbg(mdev, "burst=%lld, cbs=%lld, exp=%d, man=%d\n",
+ burst, MLX5_CALC_CBS((u64)cbs_man, cbs_exp), cbs_exp, cbs_man);
+
+ if (!cir_man || !cbs_man)
+ return -EINVAL;
+
+ flow_meters = meter->flow_meters;
+ aso = flow_meters->aso;
+
+ mutex_lock(&flow_meters->aso_lock);
+ aso_wqe = mlx5_aso_get_wqe(aso);
+ ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_DS);
+ mlx5_aso_build_wqe(aso, ds_cnt, aso_wqe, meter->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
+
+ aso_ctrl = &aso_wqe->aso_ctrl;
+ memset(aso_ctrl, 0, sizeof(*aso_ctrl));
+ aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
+ aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
+ MLX5_ASO_ALWAYS_TRUE << 4;
+ aso_ctrl->data_offset_condition_operand = MLX5_ASO_LOGICAL_OR << 6;
+ aso_ctrl->data_mask = cpu_to_be64(0x80FFFFFFULL << (meter->idx ? 0 : 32));
+
+ aso_data = (struct mlx5_wqe_aso_data_seg *)(aso_wqe + 1);
+ memset(aso_data, 0, sizeof(*aso_data));
+ aso_data->bytewise_data[meter->idx * 8] = cpu_to_be32((0x1 << 31) | /* valid */
+ (MLX5_FLOW_METER_COLOR_GREEN << MLX5_START_COLOR_SHIFT));
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS)
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_NUM_PACKETS << MLX5_METER_MODE_SHIFT);
+ else
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH << MLX5_METER_MODE_SHIFT);
+
+ aso_data->bytewise_data[meter->idx * 8 + 2] = cpu_to_be32((cbs_exp << MLX5_CBS_EXP_SHIFT) |
+ (cbs_man << MLX5_CBS_MAN_SHIFT) |
+ (cir_exp << MLX5_CIR_EXP_SHIFT) |
+ cir_man);
+
+ mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl);
+
+ /* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */
+ err = mlx5_aso_poll_cq(aso, true, 10);
+ mutex_unlock(&flow_meters->aso_lock);
+
+ return err;
+}
+
+static int
+mlx5e_flow_meter_create_aso_obj(struct mlx5e_flow_meters *flow_meters, int *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_meter_aso_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ void *obj;
+ int err;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, log_obj_range, flow_meters->log_granularity);
+
+ obj = MLX5_ADDR_OF(create_flow_meter_aso_obj_in, in, flow_meter_aso_obj);
+ MLX5_SET(flow_meter_aso_obj, obj, meter_aso_access_pd, flow_meters->pdn);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err) {
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) created\n", *obj_id);
+ }
+
+ return err;
+}
+
+static void
+mlx5e_flow_meter_destroy_aso_obj(struct mlx5_core_dev *mdev, u32 obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) destroyed\n", obj_id);
+}
+
+static struct mlx5e_flow_meter_handle *
+__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
+{
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ struct mlx5e_flow_meter_handle *meter;
+ int err, pos, total;
+ u32 id;
+
+ meter = kzalloc(sizeof(*meter), GFP_KERNEL);
+ if (!meter)
+ return ERR_PTR(-ENOMEM);
+
+ meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
+ struct mlx5e_flow_meter_aso_obj,
+ entry);
+ /* 2 meters in one object */
+ total = 1 << (flow_meters->log_granularity + 1);
+ if (!meters_obj) {
+ err = mlx5e_flow_meter_create_aso_obj(flow_meters, &id);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create flow meter ASO object\n");
+ goto err_create;
+ }
+
+ meters_obj = kzalloc(sizeof(*meters_obj) + BITS_TO_BYTES(total),
+ GFP_KERNEL);
+ if (!meters_obj) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ meters_obj->base_id = id;
+ meters_obj->total_meters = total;
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ pos = 0;
+ } else {
+ pos = find_first_zero_bit(meters_obj->meters_map, total);
+ if (bitmap_weight(meters_obj->meters_map, total) == total - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->full_list);
+ }
+ }
+
+ bitmap_set(meters_obj->meters_map, pos, 1);
+ meter->flow_meters = flow_meters;
+ meter->meters_obj = meters_obj;
+ meter->obj_id = meters_obj->base_id + pos / 2;
+ meter->idx = pos % 2;
+
+ mlx5_core_dbg(mdev, "flow meter allocated, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+
+ return meter;
+
+err_mem:
+ mlx5e_flow_meter_destroy_aso_obj(mdev, id);
+err_create:
+ kfree(meter);
+ return ERR_PTR(err);
+}
+
+static void
+__mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ int n, pos;
+
+ meters_obj = meter->meters_obj;
+ pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
+ bitmap_clear(meters_obj->meters_map, pos, 1);
+ n = bitmap_weight(meters_obj->meters_map, meters_obj->total_meters);
+ if (n == 0) {
+ list_del(&meters_obj->entry);
+ mlx5e_flow_meter_destroy_aso_obj(mdev, meters_obj->base_id);
+ kfree(meters_obj);
+ } else if (n == meters_obj->total_meters - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ }
+
+ mlx5_core_dbg(mdev, "flow meter freed, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+ kfree(meter);
+}
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&flow_meters->sync_lock);
+ hash_for_each_possible(flow_meters->hashtbl, meter, hlist, params->index)
+ if (meter->params.index == params->index)
+ goto add_ref;
+
+ meter = __mlx5e_flow_meter_alloc(flow_meters);
+ if (IS_ERR(meter)) {
+ err = PTR_ERR(meter);
+ goto err_alloc;
+ }
+
+ hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
+ meter->params.index = params->index;
+
+add_ref:
+ meter->refcnt++;
+
+ if (meter->params.mode != params->mode || meter->params.rate != params->rate ||
+ meter->params.burst != params->burst) {
+ err = mlx5e_tc_meter_modify(mdev, meter, params);
+ if (err)
+ goto err_update;
+
+ meter->params.mode = params->mode;
+ meter->params.rate = params->rate;
+ meter->params.burst = params->burst;
+ }
+
+ mutex_unlock(&flow_meters->sync_lock);
+ return meter;
+
+err_update:
+ if (--meter->refcnt == 0) {
+ hash_del(&meter->hlist);
+ __mlx5e_flow_meter_free(meter);
+ }
+err_alloc:
+ mutex_unlock(&flow_meters->sync_lock);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+
+ mutex_lock(&flow_meters->sync_lock);
+ if (--meter->refcnt == 0) {
+ hash_del(&meter->hlist);
+ __mlx5e_flow_meter_free(meter);
+ }
+ mutex_unlock(&flow_meters->sync_lock);
+}
+
+struct mlx5_flow_table *
+mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters)
+{
+ return mlx5e_post_meter_get_ft(flow_meters->post_meter);
+}
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (IS_ERR_OR_NULL(post_act)) {
+ netdev_dbg(priv->netdev,
+ "flow meter offload is not supported, post action is missing\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ flow_meters = kzalloc(sizeof(*flow_meters), GFP_KERNEL);
+ if (!flow_meters)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_core_alloc_pd(mdev, &flow_meters->pdn);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc pd for flow meter aso, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_meters->aso = mlx5_aso_create(mdev, flow_meters->pdn);
+ if (IS_ERR(flow_meters->aso)) {
+ mlx5_core_warn(mdev, "Failed to create aso wqe for flow meter\n");
+ err = PTR_ERR(flow_meters->aso);
+ goto err_sq;
+ }
+
+ flow_meters->post_meter = mlx5e_post_meter_init(priv, ns_type, post_act);
+ if (IS_ERR(flow_meters->post_meter)) {
+ err = PTR_ERR(flow_meters->post_meter);
+ goto err_post_meter;
+ }
+
+ mutex_init(&flow_meters->sync_lock);
+ INIT_LIST_HEAD(&flow_meters->partial_list);
+ INIT_LIST_HEAD(&flow_meters->full_list);
+
+ flow_meters->ns_type = ns_type;
+ flow_meters->mdev = mdev;
+ flow_meters->post_act = post_act;
+ mutex_init(&flow_meters->aso_lock);
+ flow_meters->log_granularity = min_t(int, 6,
+ MLX5_CAP_QOS(mdev, log_meter_aso_max_alloc));
+
+ return flow_meters;
+
+err_post_meter:
+ mlx5_aso_destroy(flow_meters->aso);
+err_sq:
+ mlx5_core_dealloc_pd(mdev, flow_meters->pdn);
+err_out:
+ kfree(flow_meters);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters)
+{
+ if (IS_ERR_OR_NULL(flow_meters))
+ return;
+
+ mlx5e_post_meter_cleanup(flow_meters->post_meter);
+ mlx5_aso_destroy(flow_meters->aso);
+ mlx5_core_dealloc_pd(flow_meters->mdev, flow_meters->pdn);
+
+ kfree(flow_meters);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
new file mode 100644
index 000000000000..78885db5dc7d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_FLOW_METER_H__
+#define __MLX5_EN_FLOW_METER_H__
+
+struct mlx5e_flow_meter_aso_obj;
+struct mlx5e_flow_meters;
+struct mlx5_flow_attr;
+
+enum mlx5e_flow_meter_mode {
+ MLX5_RATE_LIMIT_BPS,
+ MLX5_RATE_LIMIT_PPS,
+};
+
+struct mlx5e_flow_meter_params {
+ enum mlx5e_flow_meter_mode mode;
+ /* police action index */
+ u32 index;
+ u64 rate;
+ u64 burst;
+};
+
+struct mlx5e_flow_meter_handle {
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ u32 obj_id;
+ u8 idx;
+
+ int refcnt;
+ struct hlist_node hlist;
+ struct mlx5e_flow_meter_params params;
+};
+
+struct mlx5e_meter_attr {
+ struct mlx5e_flow_meter_params params;
+ struct mlx5e_flow_meter_handle *meter;
+};
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params);
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter);
+
+struct mlx5_flow_table *
+mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters);
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_action);
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters);
+
+#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index dea137dd744b..2093cc2b0d48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -22,9 +22,9 @@ struct mlx5e_post_act_handle {
u32 id;
};
-#define MLX5_POST_ACTION_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
-#define MLX5_POST_ACTION_MAX GENMASK(MLX5_POST_ACTION_BITS - 1, 0)
-#define MLX5_POST_ACTION_MASK MLX5_POST_ACTION_MAX
+#define MLX5_POST_ACTION_BITS MLX5_REG_MAPPING_MBITS(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MASK MLX5_REG_MAPPING_MASK(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MAX MLX5_POST_ACTION_MASK
struct mlx5e_post_act *
mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
new file mode 100644
index 000000000000..efa20356764e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "en/tc_priv.h"
+#include "post_meter.h"
+#include "en/tc/post_act.h"
+
+#define MLX5_PACKET_COLOR_BITS MLX5_REG_MAPPING_MBITS(PACKET_COLOR_TO_REG)
+#define MLX5_PACKET_COLOR_MASK MLX5_REG_MAPPING_MASK(PACKET_COLOR_TO_REG)
+
+struct mlx5e_post_meter_priv {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *fwd_green_rule;
+ struct mlx5_flow_handle *drop_red_rule;
+};
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return post_meter->ft;
+}
+
+static int
+mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *root_ns;
+
+ root_ns = mlx5_get_flow_namespace(priv->mdev, ns_type);
+ if (!root_ns) {
+ mlx5_core_warn(priv->mdev, "Failed to get namespace for flow meter\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+
+ post_meter->ft = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(post_meter->ft)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n");
+ return PTR_ERR(post_meter->ft);
+ }
+
+ return 0;
+}
+
+static int
+mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *misc2, *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ misc2 = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_5, MLX5_PACKET_COLOR_MASK);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ post_meter->fg = mlx5_create_flow_group(post_meter->ft, flow_group_in);
+ if (IS_ERR(post_meter->fg)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow group\n");
+ err = PTR_ERR(post_meter->fg);
+ }
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static int
+mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5e_post_act *post_act)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n");
+ err = PTR_ERR(rule);
+ goto err_red;
+ }
+ post_meter->drop_red_rule = rule;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = mlx5e_tc_post_act_get_ft(post_act);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n");
+ err = PTR_ERR(rule);
+ goto err_green;
+ }
+ post_meter->fwd_green_rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_green:
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+err_red:
+ kvfree(spec);
+ return err;
+}
+
+static void
+mlx5e_post_meter_rules_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+ mlx5_del_flow_rules(post_meter->fwd_green_rule);
+}
+
+static void
+mlx5e_post_meter_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_group(post_meter->fg);
+}
+
+static void
+mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_table(post_meter->ft);
+}
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act)
+{
+ struct mlx5e_post_meter_priv *post_meter;
+ int err;
+
+ post_meter = kzalloc(sizeof(*post_meter), GFP_KERNEL);
+ if (!post_meter)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_post_meter_table_create(priv, ns_type, post_meter);
+ if (err)
+ goto err_ft;
+
+ err = mlx5e_post_meter_fg_create(priv, post_meter);
+ if (err)
+ goto err_fg;
+
+ err = mlx5e_post_meter_rules_create(priv, post_meter, post_act);
+ if (err)
+ goto err_rules;
+
+ return post_meter;
+
+err_rules:
+ mlx5e_post_meter_fg_destroy(post_meter);
+err_fg:
+ mlx5e_post_meter_table_destroy(post_meter);
+err_ft:
+ kfree(post_meter);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5e_post_meter_rules_destroy(post_meter);
+ mlx5e_post_meter_fg_destroy(post_meter);
+ mlx5e_post_meter_table_destroy(post_meter);
+ kfree(post_meter);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
new file mode 100644
index 000000000000..c74f3cbd810d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_POST_METER_H__
+#define __MLX5_EN_POST_METER_H__
+
+#define packet_color_to_reg { \
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5, \
+ .moffset = 0, \
+ .mlen = 8, \
+ .soffset = MLX5_BYTE_OFF(fte_match_param, \
+ misc_parameters_2.metadata_reg_c_5), \
+}
+
+struct mlx5e_post_meter_priv;
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act);
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter);
+
+#endif /* __MLX5_EN_POST_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 25f51f80a9b4..af959fadbecf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -36,8 +36,8 @@
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
-#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
-#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
+#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
+#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
/* Statically allocate modify actions for
* ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 00a3ba862afb..5bbd6b92840f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -62,10 +62,11 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_4),\
}
+/* 8 LSB of metadata C5 are reserved for packet color */
#define fteid_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
- .moffset = 0,\
- .mlen = 32,\
+ .moffset = 8,\
+ .mlen = 24,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_5),\
}
@@ -84,10 +85,8 @@ struct mlx5_ct_attr {
.mlen = ESW_ZONE_ID_BITS,\
}
-#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
-#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
-#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
+#define MLX5_CT_ZONE_BITS MLX5_REG_MAPPING_MBITS(ZONE_TO_REG)
+#define MLX5_CT_ZONE_MASK MLX5_REG_MAPPING_MASK(ZONE_TO_REG)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 3b74a6fd5c43..d2bdfd6872bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -203,7 +203,13 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
+struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev);
+
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack);
+
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
deleted file mode 100644
index e4eeb2ba21c7..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_STEERING_H__
-#define __MLX5_IPSEC_STEERING_H__
-
-#include "en.h"
-#include "ipsec.h"
-#include "ipsec_offload.h"
-#include "en/fs.h"
-
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule);
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- struct mlx5e_ipsec_rule *ipsec_rule);
-#endif /* __MLX5_IPSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 4b6f0d1ea59a..cc5cb3010e64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -458,7 +458,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
int datalen;
u32 seq;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
if (!datalen)
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index adf5cc6a7b8c..dec183ccd4ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -62,6 +62,7 @@ struct mlx5_tc_int_port_priv;
struct mlx5e_rep_bond;
struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
+struct mlx5e_flow_meters;
struct mlx5_rep_uplink_priv {
/* indirect block callbacks are invoked on bind/unbind events
@@ -97,6 +98,8 @@ struct mlx5_rep_uplink_priv {
/* OVS internal port support */
struct mlx5e_tc_int_port_priv *int_port_priv;
+
+ struct mlx5e_flow_meters *flow_meters;
};
struct mlx5e_rep_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 34bf11cdf90f..5e70e99aa1f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -59,6 +59,7 @@
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
#include "en/tc/act/act.h"
+#include "en/tc/post_meter.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -104,6 +105,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
.mlen = 16,
},
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
+ [PACKET_COLOR_TO_REG] = packet_color_to_reg,
};
/* To avoid false lock dependency warning set the tc_ht lock
@@ -240,6 +242,30 @@ mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
return NULL;
}
+struct mlx5e_flow_meters *
+mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_priv *priv;
+
+ if (is_mdev_switchdev_mode(dev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ priv = netdev_priv(uplink_rpriv->netdev);
+ if (!uplink_priv->flow_meters)
+ uplink_priv->flow_meters =
+ mlx5e_flow_meters_init(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ uplink_priv->post_act);
+ if (!IS_ERR(uplink_priv->flow_meters))
+ return uplink_priv->flow_meters;
+ }
+
+ return NULL;
+}
+
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
@@ -319,12 +345,39 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
+static bool
+is_flow_meter_action(struct mlx5_flow_attr *attr)
+{
+ return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
+}
+
+static int
+mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = mlx5e_tc_meter_get(priv->mdev, &attr->meter_attr.params);
+ if (IS_ERR(meter)) {
+ mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
+ return PTR_ERR(meter);
+ }
+
+ attr->meter_attr.meter = meter;
+ attr->dest_ft = mlx5e_tc_meter_get_post_meter_ft(meter->flow_meters);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ return 0;
+}
+
struct mlx5_flow_handle *
mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int err;
if (attr->flags & MLX5_ATTR_FLAG_CT) {
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
@@ -341,6 +394,12 @@ mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
+ if (is_flow_meter_action(attr)) {
+ err = mlx5e_tc_add_flow_meter(priv, attr);
+ if (err)
+ return ERR_PTR(err);
+ }
+
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
@@ -367,6 +426,9 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
}
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+
+ if (attr->meter_attr.meter)
+ mlx5e_tc_meter_put(attr->meter_attr.meter);
}
int
@@ -4519,9 +4581,9 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
return err;
}
-static int mlx5e_policer_validate(const struct flow_action *action,
- const struct flow_action_entry *act,
- struct netlink_ext_ack *extack)
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
{
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
@@ -4529,13 +4591,6 @@ static int mlx5e_policer_validate(const struct flow_action *action,
return -EOPNOTSUPP;
}
- if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
- act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
- NL_SET_ERR_MSG_MOD(extack,
- "Offload not supported when conform action is not pipe or ok");
- return -EOPNOTSUPP;
- }
-
if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
!flow_action_is_last_entry(action, act)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -4586,6 +4641,12 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not continue");
+ return -EOPNOTSUPP;
+ }
+
err = mlx5e_policer_validate(flow_action, act, extack);
if (err)
return err;
@@ -4956,6 +5017,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
+ mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
}
@@ -5061,7 +5123,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
- zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
+ zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index e2a1250aeca1..517f2252b5ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -39,6 +39,7 @@
#include "en/tc_ct.h"
#include "en/tc_tun.h"
#include "en/tc/int_port.h"
+#include "en/tc/meter.h"
#include "en_rep.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
@@ -71,6 +72,7 @@ struct mlx5_flow_attr {
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_ct_attr ct_attr;
struct mlx5e_sample_attr sample_attr;
+ struct mlx5e_meter_attr meter_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 chain;
u16 prio;
@@ -83,6 +85,7 @@ struct mlx5_flow_attr {
u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */
u32 flags;
+ u32 exe_aso_type;
struct list_head list;
struct mlx5e_post_act_handle *post_act_handle;
struct {
@@ -229,6 +232,7 @@ enum mlx5e_tc_attr_to_reg {
FTEID_TO_REG,
NIC_CHAIN_TO_REG,
NIC_ZONE_RESTORE_TO_REG,
+ PACKET_COLOR_TO_REG,
};
struct mlx5e_tc_attr_to_reg_mapping {
@@ -241,6 +245,10 @@ struct mlx5e_tc_attr_to_reg_mapping {
extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
+#define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset)
+#define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen)
+#define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0))
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 50d14cec4894..64d78fd99c6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -152,14 +152,14 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ ihs = skb_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
} else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ihs = skb_tcp_all_headers(skb);
if (ipv6_has_hopopt_jumbo(skb)) {
*hopbyhop = sizeof(struct hop_jumbo_hdr);
ihs -= sizeof(struct hop_jumbo_hdr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 719ef26d23c0..b938632f89ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1152,8 +1152,6 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
{
const u32 *out;
- WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
-
if (num_vfs < 0)
return;
@@ -1186,6 +1184,9 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
int total_vports;
int err;
+ if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
+ return 0;
+
total_vports = mlx5_eswitch_get_total_vports(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
@@ -1203,6 +1204,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
} else {
esw_warn(dev, "ingress ACL is not supported by FW\n");
}
+ esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
return 0;
err:
@@ -1215,6 +1217,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
+ esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
mlx5_fs_ingress_acls_cleanup(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
@@ -1224,7 +1227,6 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
- * @mode: Eswitch mode to enable
* @num_vfs: Enable eswitch for given number of VFs. This is optional.
* Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
* Caller should pass num_vfs > 0 when enabling eswitch for
@@ -1238,7 +1240,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
* mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
* It returns 0 on success or error code on failure.
*/
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
@@ -1257,9 +1259,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
- esw->mode = mode;
-
- if (mode == MLX5_ESWITCH_LEGACY) {
+ if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
mlx5_rescan_drivers(esw->dev);
@@ -1269,22 +1269,19 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
if (err)
goto abort;
+ esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
+
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
- mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- mlx5_esw_mode_change_notify(esw, mode);
+ mlx5_esw_mode_change_notify(esw, esw->mode);
return 0;
abort:
- esw->mode = MLX5_ESWITCH_NONE;
-
- if (mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
-
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1305,14 +1302,14 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (!mlx5_esw_allowed(esw))
return 0;
- toggle_lag = esw->mode == MLX5_ESWITCH_NONE;
+ toggle_lag = !mlx5_esw_is_fdb_created(esw);
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- if (esw->mode == MLX5_ESWITCH_NONE) {
- ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
+ if (!mlx5_esw_is_fdb_created(esw)) {
+ ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
enum mlx5_eswitch_vport_event vport_events;
@@ -1330,55 +1327,79 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
return ret;
}
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
+/* When disabling sriov, free driver level resources. */
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
{
- struct devlink *devlink = priv_to_devlink(esw->dev);
- int old_mode;
-
- lockdep_assert_held_write(&esw->mode_lock);
-
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_allowed(esw))
return;
- esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ down_write(&esw->mode_lock);
+ /* If driver is unloaded, this function is called twice by remove_one()
+ * and mlx5_unload(). Prevent the second call.
+ */
+ if (!esw->esw_funcs.num_vfs && !clear_vf)
+ goto unlock;
+
+ esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- /* Notify eswitch users that it is exiting from current mode.
- * So that it can do necessary cleanup before the eswitch is disabled.
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
+ /* If disabling sriov in switchdev mode, free meta rules here
+ * because it depends on num_vfs.
*/
- mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
+ struct devlink *devlink = priv_to_devlink(esw->dev);
- mlx5_eswitch_event_handlers_unregister(esw);
+ esw_offloads_del_send_to_vport_meta_rules(esw);
+ devlink_rate_nodes_destroy(devlink);
+ }
- if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_legacy_disable(esw);
- else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_disable(esw);
+ esw->esw_funcs.num_vfs = 0;
- old_mode = esw->mode;
- esw->mode = MLX5_ESWITCH_NONE;
+unlock:
+ up_write(&esw->mode_lock);
+}
- if (old_mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
+/* Free resources for corresponding eswitch mode. It is called by devlink
+ * when changing eswitch mode or modprobe when unloading driver.
+ */
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
+{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
+
+ /* Notify eswitch users that it is exiting from current mode.
+ * So that it can do necessary cleanup before the eswitch is disabled.
+ */
+ mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
- devlink_rate_nodes_destroy(devlink);
+ mlx5_eswitch_event_handlers_unregister(esw);
+ esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
+
+ esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ esw_offloads_disable(esw);
+ else if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_legacy_disable(esw);
mlx5_esw_acls_ns_cleanup(esw);
- if (clear_vf)
- mlx5_eswitch_clear_vf_vports_info(esw);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ devlink_rate_nodes_destroy(devlink);
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_allowed(esw))
return;
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- mlx5_eswitch_disable_locked(esw, clear_vf);
- esw->esw_funcs.num_vfs = 0;
+ mlx5_eswitch_disable_locked(esw);
up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
@@ -1573,7 +1594,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
- esw->mode = MLX5_ESWITCH_NONE;
+ esw->mode = MLX5_ESWITCH_LEGACY;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
@@ -1875,7 +1896,7 @@ u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
- return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
+ return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
@@ -1995,8 +2016,6 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
*/
void mlx5_esw_unlock(struct mlx5_eswitch *esw)
{
- if (!mlx5_esw_allowed(esw))
- return;
up_write(&esw->mode_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2754a732914d..c19604b06a2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -282,10 +282,15 @@ struct mlx5_esw_functions {
enum {
MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
+ MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
};
struct mlx5_esw_bridge_offloads;
+enum {
+ MLX5_ESW_FDB_CREATED = BIT(0),
+};
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -337,6 +342,7 @@ void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw);
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
@@ -350,10 +356,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -575,6 +582,11 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
return dl_port_index & 0xffff;
}
+static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
+}
+
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
@@ -719,7 +731,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
-static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
+static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
+static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 2ce3728576d1..e224ec7005a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1040,6 +1040,15 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
mlx5_del_flow_rules(flows[i]);
kvfree(flows);
+ /* If changing eswitch mode from switchdev to legacy, but num_vfs is not 0,
+ * meta rules could be freed again. So set it to NULL.
+ */
+ esw->fdb_table.offloads.send_to_vport_meta_rules = NULL;
+}
+
+void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
}
static int
@@ -2034,7 +2043,7 @@ static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
@@ -2170,18 +2179,18 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- esw->dev->priv.sriov.num_vfs);
+ esw->mode = MLX5_ESWITCH_OFFLOADS;
+ err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
}
+ mlx5_rescan_drivers(esw->dev);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
@@ -2894,7 +2903,7 @@ int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
int err = 0;
down_write(&esw->mode_lock);
- if (esw->mode != MLX5_ESWITCH_NONE) {
+ if (mlx5_esw_is_fdb_created(esw)) {
err = -EBUSY;
goto done;
}
@@ -3229,13 +3238,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_OFFLOADS;
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
@@ -3334,15 +3342,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
-{
- /* devlink commands in NONE eswitch mode are currently supported only
- * on ECPF.
- */
- return (esw->mode == MLX5_ESWITCH_NONE &&
- !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
-}
-
/* FIXME: devl_unlock() followed by devl_lock() inside driver callback
* is never correct and prone to races. It's a transitional workaround,
* never repeat this pattern.
@@ -3399,6 +3398,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
+ mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -3409,6 +3409,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
err = esw_offloads_stop(esw, extack);
+ mlx5_rescan_drivers(esw->dev);
} else {
err = -EINVAL;
}
@@ -3431,12 +3432,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return PTR_ERR(esw);
mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
err = esw_mode_to_devlink(esw->mode, mode);
-unlock:
mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3485,9 +3481,6 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
return PTR_ERR(esw);
mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto out;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -3539,12 +3532,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return PTR_ERR(esw);
mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
-unlock:
mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3555,16 +3543,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw;
- int err;
+ int err = 0;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
@@ -3615,21 +3600,15 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
*encap = esw->offloads.encap;
-unlock:
mlx5_eswtich_mode_callback_exit(devlink, esw);
- return err;
+ return 0;
}
static bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 2a8fc547eb37..641505d2c0c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -632,6 +632,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
+ struct mlx5_core_dev *dev;
u8 mode;
#endif
int i;
@@ -641,11 +642,11 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
return false;
#ifdef CONFIG_MLX5_ESWITCH
- mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev);
-
- if (mode != MLX5_ESWITCH_NONE && mode != MLX5_ESWITCH_OFFLOADS)
+ dev = ldev->pf[MLX5_LAG_P1].dev;
+ if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev))
return false;
+ mode = mlx5_eswitch_mode(dev);
for (i = 0; i < ldev->ports; i++)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
@@ -760,8 +761,7 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
#ifdef CONFIG_MLX5_ESWITCH
for (i = 0; i < ldev->ports; i++)
- roce_lag = roce_lag &&
- ldev->pf[i].dev->priv.eswitch->mode == MLX5_ESWITCH_NONE;
+ roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
return roce_lag;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
new file mode 100644
index 000000000000..21e14507ff5c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/transobj.h>
+#include "aso.h"
+#include "wq.h"
+
+struct mlx5_aso_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5_aso {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
+ struct mlx5_aso_cq cq;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+} ____cacheline_aligned_in_smp;
+
+static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
+ void *cqc_data, struct mlx5_aso_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_vector2eqn(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
+ struct mlx5_aso_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, log_cq_size, 1);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = create_aso_cq(cq, cqc_data);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
+ goto err_free_cq;
+ }
+
+ kvfree(cqc_data);
+ return 0;
+
+err_free_cq:
+ mlx5_aso_free_cq(cq);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ int err;
+
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ return 0;
+}
+
+static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ int err;
+
+ err = create_aso_sq(mdev, pdn, sqc_data, sq);
+ if (err)
+ return err;
+
+ err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+
+ return err;
+}
+
+static void mlx5_aso_free_sq(struct mlx5_aso *sq)
+{
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_aso_free_sq(sq);
+}
+
+static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
+ u32 pdn, struct mlx5_aso *sq)
+{
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, pdn);
+ MLX5_SET(wq, wq, log_wq_sz, 1);
+
+ err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
+ goto err_free_asosq;
+ }
+
+ mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_free_asosq:
+ mlx5_aso_free_sq(sq);
+err_out:
+ kvfree(sqc_data);
+ return err;
+}
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ struct mlx5_aso *aso;
+ int err;
+
+ aso = kzalloc(sizeof(*aso), GFP_KERNEL);
+ if (!aso)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
+ if (err)
+ goto err_cq;
+
+ err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
+ if (err)
+ goto err_sq;
+
+ return aso;
+
+err_sq:
+ mlx5_aso_destroy_cq(&aso->cq);
+err_cq:
+ kfree(aso);
+ return ERR_PTR(err);
+}
+
+void mlx5_aso_destroy(struct mlx5_aso *aso)
+{
+ if (IS_ERR_OR_NULL(aso))
+ return;
+
+ mlx5_aso_destroy_sq(aso);
+ mlx5_aso_destroy_cq(&aso->cq);
+ kfree(aso);
+}
+
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
+ (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_ACCESS_ASO);
+ cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->general_id = cpu_to_be32(obj_id);
+}
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
+{
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
+ return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+}
+
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg)
+{
+ doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ if (with_data)
+ aso->pc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->pc += MLX5_ASO_WQEBBS;
+ *aso->wq.db = cpu_to_be32(aso->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
+{
+ struct mlx5_aso_cq *cq = &aso->cq;
+ struct mlx5_cqe64 *cqe;
+ unsigned long expires;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+
+ expires = jiffies + msecs_to_jiffies(interval_ms);
+ while (!cqe && time_is_after_jiffies(expires)) {
+ usleep_range(2, 10);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ }
+
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe;
+
+ mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+
+ err_cqe = (struct mlx5_err_cqe *)cqe;
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
+ err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n",
+ err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (with_data)
+ aso->cc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->cc += MLX5_ASO_WQEBBS;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
new file mode 100644
index 000000000000..b3bbf284fe71
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_ASO_H__
+#define __MLX5_LIB_ASO_H__
+
+#include <linux/mlx5/qp.h>
+#include "mlx5_core.h"
+
+#define MLX5_ASO_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
+#define MLX5_ASO_WQEBBS_DATA \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
+#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
+
+struct mlx5_wqe_aso_ctrl_seg {
+ __be32 va_h;
+ __be32 va_l; /* include read_enable */
+ __be32 l_key;
+ u8 data_mask_mode;
+ u8 condition_1_0_operand;
+ u8 condition_1_0_offset;
+ u8 data_offset_condition_operand;
+ __be32 condition_0_data;
+ __be32 condition_0_mask;
+ __be32 condition_1_data;
+ __be32 condition_1_mask;
+ __be64 bitwise_data;
+ __be64 data_mask;
+};
+
+struct mlx5_wqe_aso_data_seg {
+ __be32 bytewise_data[16];
+};
+
+struct mlx5_aso_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+};
+
+struct mlx5_aso_wqe_data {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+ struct mlx5_wqe_aso_data_seg aso_data;
+};
+
+enum {
+ MLX5_ASO_LOGICAL_AND,
+ MLX5_ASO_LOGICAL_OR,
+};
+
+enum {
+ MLX5_ASO_ALWAYS_FALSE,
+ MLX5_ASO_ALWAYS_TRUE,
+ MLX5_ASO_EQUAL,
+ MLX5_ASO_NOT_EQUAL,
+ MLX5_ASO_GREATER_OR_EQUAL,
+ MLX5_ASO_LESSER_OR_EQUAL,
+ MLX5_ASO_LESSER,
+ MLX5_ASO_GREATER,
+ MLX5_ASO_CYCLIC_GREATER,
+ MLX5_ASO_CYCLIC_LESSER,
+};
+
+enum {
+ MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT,
+ MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE,
+ MLX5_ASO_DATA_MASK_MODE_CALCULATED_64BYTE,
+};
+
+enum {
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
+};
+
+struct mlx5_aso;
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode);
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg);
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms);
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
+void mlx5_aso_destroy(struct mlx5_aso *aso);
+#endif /* __MLX5_LIB_ASO_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2078d9f03a5f..a9e51c1b7738 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1250,6 +1250,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
+ mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 3be659cd91f1..7d955a4d9f14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -501,7 +501,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
case MLX5_ESWITCH_OFFLOADS:
mlx5_sf_table_enable(table);
break;
- case MLX5_ESWITCH_NONE:
+ case MLX5_ESWITCH_LEGACY:
mlx5_sf_table_disable(table);
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2935614f6fa9..5757cd6e1819 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -145,8 +145,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
sriov->vfs_ctx[vf].enabled = 0;
}
- if (MLX5_ESWITCH_MANAGER(dev))
- mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index c57e293cca25..c2d6d64ffe4b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -28,7 +28,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_qdisc.o spectrum_span.o \
spectrum_nve.o spectrum_nve_vxlan.o \
spectrum_dpipe.o spectrum_trap.o \
- spectrum_ethtool.o spectrum_policer.o
+ spectrum_ethtool.o spectrum_policer.o \
+ spectrum_pgt.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 91f68fb0b420..666d6b6e4dbf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -633,6 +633,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+/* cmd_mbox_config_set_ubridge
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
+
/* cmd_mbox_config_set_kvd_linear_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
@@ -713,16 +719,25 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+enum mlxsw_cmd_mbox_config_profile_flood_mode {
+ /* Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset
+ * tables. max_fid_flood_tables indicates the number of per-FID tables.
+ * Reserved when unified bridge model is used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED = 3,
+ /* Controlled flood tables. Reserved when legacy bridge model is
+ * used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED = 4,
+};
+
/* cmd_mbox_config_profile_flood_mode
* Flooding mode to use.
- * 0-2 - Backward compatible modes for SwitchX devices.
- * 3 - Mixed mode, where:
- * max_flood_tables indicates the number of single-entry tables.
- * max_vid_flood_tables indicates the number of per-VID tables.
- * max_fid_offset_flood_tables indicates the number of FID-offset tables.
- * max_fid_flood_tables indicates the number of per-FID tables.
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 3);
/* cmd_mbox_config_profile_max_fid_offset_flood_tables
* Maximum number of FID-offset flooding tables.
@@ -783,6 +798,13 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+/* cmd_mbox_config_profile_ubridge
+ * Unified Bridge
+ * 0 - non unified bridge
+ * 1 - unified bridge
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
+
/* cmd_mbox_config_kvd_linear_size
* KVD Linear Size
* Valid for Spectrum only
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index d1e8b8b8d0c1..a3491ef2aa7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -295,6 +295,7 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
+ used_ubridge:1,
used_kvd_sizes:1;
u8 max_vepa_channels;
u16 max_mid;
@@ -314,6 +315,7 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
+ u8 ubridge;
u32 kvd_linear_size;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index fa33caecc91d..636db9a87457 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1164,7 +1164,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
* trap control. In addition, the Trap / Discard action enables activating
* SPAN (port mirroring).
*
- * The Trap with userdef action action has the same functionality as
+ * The Trap with userdef action has the same functionality as
* the Trap action with addition of user defined value that can be set
* and used by higher layer applications.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 34bec9cd572c..0107cbc32fc7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -180,7 +180,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
} else {
/* When reading upper pages 1, 2 and 3 the offset
* starts at 0 and I2C high address is used. Please refer
- * refer to "Memory Organization" figure in SFF-8472
+ * to "Memory Organization" figure in SFF-8472
* specification for graphical depiction.
*/
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 8dd2479c7937..41f0f68bc911 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1235,6 +1235,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
+ if (profile->used_ubridge) {
+ mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
+ profile->ubridge);
+ }
if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
@@ -1551,6 +1556,14 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_config_profile;
+ /* Some resources depend on unified bridge model, which is configured
+ * as part of config_profile. Query the resources again to get correct
+ * values.
+ */
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+ if (err)
+ goto err_requery_resources;
+
err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
if (err)
goto err_aqs_init;
@@ -1568,6 +1581,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
+err_requery_resources:
err_config_profile:
err_cqe_v_check:
err_query_resources:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index c9070e2a9dc4..17ce28e65464 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -380,7 +380,7 @@ static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
- const char *mac, u16 fid_vid,
+ const char *mac, u16 fid_vid, u16 vid,
enum mlxsw_reg_sfd_rec_action action,
u16 local_port)
{
@@ -389,6 +389,8 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_set_vid_set(payload, rec_index, vid ? true : false);
+ mlxsw_reg_sfd_uc_vid_set(payload, rec_index, vid);
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
@@ -454,6 +456,7 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_lag_set_vid_set(payload, rec_index, true);
mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
@@ -1054,9 +1057,10 @@ enum mlxsw_reg_sfgc_type {
*/
MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
-enum mlxsw_reg_sfgc_bridge_type {
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
- MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+/* bridge_type is used in SFGC and SFMR. */
+enum mlxsw_reg_bridge_type {
+ MLXSW_REG_BRIDGE_TYPE_0 = 0, /* Used for .1q FIDs. */
+ MLXSW_REG_BRIDGE_TYPE_1 = 1, /* Used for .1d FIDs. */
};
/* reg_sfgc_bridge_type
@@ -1111,15 +1115,16 @@ MLXSW_ITEM32(reg, sfgc, mid_base, 0x10, 0, 16);
static inline void
mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
- enum mlxsw_reg_sfgc_bridge_type bridge_type,
+ enum mlxsw_reg_bridge_type bridge_type,
enum mlxsw_flood_table_type table_type,
- unsigned int flood_table)
+ unsigned int flood_table, u16 mid_base)
{
MLXSW_REG_ZERO(sfgc, payload);
mlxsw_reg_sfgc_type_set(payload, type);
mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfgc_table_type_set(payload, table_type);
mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+ mlxsw_reg_sfgc_mid_base_set(payload, mid_base);
}
/* SFDF - Switch Filtering DB Flush
@@ -1653,40 +1658,43 @@ MLXSW_ITEM32(reg, svfa, irif, 0x14, 0, 16);
static inline void __mlxsw_reg_svfa_pack(char *payload,
enum mlxsw_reg_svfa_mt mt, bool valid,
- u16 fid)
+ u16 fid, bool irif_v, u16 irif)
{
MLXSW_REG_ZERO(svfa, payload);
mlxsw_reg_svfa_swid_set(payload, 0);
mlxsw_reg_svfa_mapping_table_set(payload, mt);
mlxsw_reg_svfa_v_set(payload, valid);
mlxsw_reg_svfa_fid_set(payload, fid);
+ mlxsw_reg_svfa_irif_v_set(payload, irif_v);
+ mlxsw_reg_svfa_irif_set(payload, irif_v ? irif : 0);
}
static inline void mlxsw_reg_svfa_port_vid_pack(char *payload, u16 local_port,
- bool valid, u16 fid, u16 vid)
+ bool valid, u16 fid, u16 vid,
+ bool irif_v, u16 irif)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- __mlxsw_reg_svfa_pack(payload, mt, valid, fid);
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_local_port_set(payload, local_port);
mlxsw_reg_svfa_vid_set(payload, vid);
}
static inline void mlxsw_reg_svfa_vid_pack(char *payload, bool valid, u16 fid,
- u16 vid)
+ u16 vid, bool irif_v, u16 irif)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
- __mlxsw_reg_svfa_pack(payload, mt, valid, fid);
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vid_set(payload, vid);
}
static inline void mlxsw_reg_svfa_vni_pack(char *payload, bool valid, u16 fid,
- u32 vni)
+ u32 vni, bool irif_v, u16 irif)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VNI_TO_FID;
- __mlxsw_reg_svfa_pack(payload, mt, valid, fid);
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vni_set(payload, vni);
}
@@ -1960,7 +1968,9 @@ MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
- u16 fid_offset)
+ u16 fid_offset, bool flood_rsp,
+ enum mlxsw_reg_bridge_type bridge_type,
+ bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
@@ -1968,6 +1978,10 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
mlxsw_reg_sfmr_vtfp_set(payload, false);
mlxsw_reg_sfmr_vv_set(payload, false);
+ mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
+ mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_sfmr_smpe_set(payload, smpe);
}
/* SPVMLR - Switch Port VLAN MAC Learning Register
@@ -6937,16 +6951,6 @@ MLXSW_ITEM32(reg, ritr, vlan_if_efid, 0x0C, 0, 16);
*/
MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
-static inline void mlxsw_reg_ritr_fid_set(char *payload,
- enum mlxsw_reg_ritr_if_type rif_type,
- u16 fid)
-{
- if (rif_type == MLXSW_REG_RITR_FID_IF)
- mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
- else
- mlxsw_reg_ritr_vlan_if_vlan_id_set(payload, fid);
-}
-
/* Sub-port Interface */
/* reg_ritr_sp_if_lag
@@ -7112,10 +7116,11 @@ static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
}
static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
- u16 system_port, u16 vid)
+ u16 system_port, u16 efid, u16 vid)
{
mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
+ mlxsw_reg_ritr_sp_if_efid_set(payload, efid);
mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index daacf6291253..826e47fb4586 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -11,6 +11,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_PGT_SIZE,
MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
@@ -69,6 +70,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_PGT_SIZE] = 0x1004,
[MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
[MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index a62887b8d98e..a703ca257198 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3010,6 +3010,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_sp_pgt_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
+ goto err_pgt_init;
+ }
+
err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
@@ -3201,6 +3207,8 @@ err_traps_init:
err_policers_init:
mlxsw_sp_fids_fini(mlxsw_sp);
err_fids_init:
+ mlxsw_sp_pgt_fini(mlxsw_sp);
+err_pgt_init:
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
return err;
@@ -3232,7 +3240,9 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
+ mlxsw_sp->pgt_smpe_index_valid = true;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3264,7 +3274,9 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3296,7 +3308,9 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3328,7 +3342,9 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3361,28 +3377,20 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_policers_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp_pgt_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
}
-/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
- * 802.1Q FIDs
- */
-#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
- VLAN_VID_MASK - 1)
-
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
.used_kvd_sizes = 1,
.kvd_hash_single_parts = 59,
.kvd_hash_double_parts = 41,
@@ -3396,17 +3404,14 @@ static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
};
static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
.swid_config = {
{
.used_type = 1,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 36c6f5b89c71..50a9380b76e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -112,15 +112,6 @@ enum mlxsw_sp_nve_type {
MLXSW_SP_NVE_TYPE_VXLAN,
};
-struct mlxsw_sp_mid {
- struct list_head list;
- unsigned char addr[ETH_ALEN];
- u16 fid;
- u16 mid;
- bool in_hw;
- unsigned long *ports_in_mid; /* bits array */
-};
-
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
@@ -143,6 +134,7 @@ struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
+struct mlxsw_sp_pgt;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -211,10 +203,13 @@ struct mlxsw_sp {
const struct mlxsw_sp_mall_ops *mall_ops;
const struct mlxsw_sp_router_ops *router_ops;
const struct mlxsw_listener *listeners;
+ const struct mlxsw_sp_fid_family **fid_family_arr;
size_t listeners_count;
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
+ struct mlxsw_sp_pgt *pgt;
+ bool pgt_smpe_index_valid;
};
struct mlxsw_sp_ptp_ops {
@@ -390,6 +385,31 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
+struct mlxsw_sp_ports_bitmap {
+ unsigned long *bitmap;
+ unsigned int nbits;
+};
+
+static inline int
+mlxsw_sp_port_bitmap_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ unsigned int nbits = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ ports_bm->nbits = nbits;
+ ports_bm->bitmap = bitmap_zalloc(nbits, GFP_KERNEL);
+ if (!ports_bm->bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void
+mlxsw_sp_port_bitmap_fini(struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ bitmap_free(ports_bm->bitmap);
+}
+
static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
bool *trap_en)
{
@@ -716,6 +736,7 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
@@ -1237,7 +1258,6 @@ int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);
int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
@@ -1265,7 +1285,8 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid);
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
@@ -1287,6 +1308,9 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+extern const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[];
+extern const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[];
+
/* spectrum_mr.c */
enum mlxsw_sp_mr_route_prio {
MLXSW_SP_MR_ROUTE_PRIO_SG,
@@ -1444,4 +1468,16 @@ int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
+/* spectrum_pgt.c */
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid);
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base);
+int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member);
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 10ae1115de6c..24ff305a2995 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -15,7 +15,7 @@ struct mlxsw_sp2_kvdl_part_info {
* usage bits we need and how many indexes there are
* represented by a single bit. This could be got from FW
* querying appropriate resources. So have the resource
- * ids for for this purpose in partition definition.
+ * ids for this purpose in partition definition.
*/
enum mlxsw_res_id usage_bit_count_res_id;
enum mlxsw_res_id index_range_res_id;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 86b88e686fd3..045a24cacfa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -22,11 +22,18 @@ struct mlxsw_sp_fid_core {
unsigned int *port_fid_mappings;
};
+struct mlxsw_sp_fid_port_vid {
+ struct list_head list;
+ u16 local_port;
+ u16 vid;
+};
+
struct mlxsw_sp_fid {
struct list_head list;
struct mlxsw_sp_rif *rif;
refcount_t ref_count;
u16 fid_index;
+ u16 fid_offset;
struct mlxsw_sp_fid_family *fid_family;
struct rhash_head ht_node;
@@ -37,6 +44,7 @@ struct mlxsw_sp_fid {
int nve_ifindex;
u8 vni_valid:1,
nve_flood_index_valid:1;
+ struct list_head port_vid_list; /* Ordered by local port. */
};
struct mlxsw_sp_fid_8021q {
@@ -63,7 +71,6 @@ static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
struct mlxsw_sp_flood_table {
enum mlxsw_sp_flood_type packet_type;
- enum mlxsw_reg_sfgc_bridge_type bridge_type;
enum mlxsw_flood_table_type table_type;
int table_index;
};
@@ -76,18 +83,18 @@ struct mlxsw_sp_fid_ops {
u16 *p_fid_index);
bool (*compare)(const struct mlxsw_sp_fid *fid,
const void *arg);
- u16 (*flood_index)(const struct mlxsw_sp_fid *fid);
int (*port_vid_map)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
- int (*vni_set)(struct mlxsw_sp_fid *fid, __be32 vni);
+ int (*vni_set)(struct mlxsw_sp_fid *fid);
void (*vni_clear)(struct mlxsw_sp_fid *fid);
- int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index);
+ int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid);
void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev);
+ int (*vid_to_fid_rif_update)(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif);
};
struct mlxsw_sp_fid_family {
@@ -102,7 +109,10 @@ struct mlxsw_sp_fid_family {
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
- u8 lag_vid_valid:1;
+ bool flood_rsp;
+ enum mlxsw_reg_bridge_type bridge_type;
+ u16 pgt_base;
+ bool smpe_index_valid;
};
static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
@@ -137,11 +147,6 @@ bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
return fid_family->start_index == fid_index;
}
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_family->lag_vid_valid;
-}
-
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index)
{
@@ -206,17 +211,20 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
int err;
- if (WARN_ON(!ops->nve_flood_index_set || fid->nve_flood_index_valid))
+ if (WARN_ON(fid->nve_flood_index_valid))
return -EINVAL;
- err = ops->nve_flood_index_set(fid, nve_flood_index);
- if (err)
- return err;
-
fid->nve_flood_index = nve_flood_index;
fid->nve_flood_index_valid = true;
+ err = ops->nve_flood_index_set(fid);
+ if (err)
+ goto err_nve_flood_index_set;
return 0;
+
+err_nve_flood_index_set:
+ fid->nve_flood_index_valid = false;
+ return err;
}
void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
@@ -224,7 +232,7 @@ void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
- if (WARN_ON(!ops->nve_flood_index_clear || !fid->nve_flood_index_valid))
+ if (WARN_ON(!fid->nve_flood_index_valid))
return;
fid->nve_flood_index_valid = false;
@@ -244,7 +252,7 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
int err;
- if (WARN_ON(!ops->vni_set || fid->vni_valid))
+ if (WARN_ON(fid->vni_valid))
return -EINVAL;
fid->nve_type = type;
@@ -256,15 +264,15 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
if (err)
return err;
- err = ops->vni_set(fid, vni);
+ fid->vni_valid = true;
+ err = ops->vni_set(fid);
if (err)
goto err_vni_set;
- fid->vni_valid = true;
-
return 0;
err_vni_set:
+ fid->vni_valid = false;
rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
mlxsw_sp_fid_vni_ht_params);
return err;
@@ -276,7 +284,7 @@ void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid)
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (WARN_ON(!ops->vni_clear || !fid->vni_valid))
+ if (WARN_ON(!fid->vni_valid))
return;
fid->vni_valid = false;
@@ -316,34 +324,43 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
return NULL;
}
+static u16
+mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family)
+{
+ return fid_family->end_index - fid_family->start_index + 1;
+}
+
+static u16
+mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 fid_offset)
+{
+ u16 num_fids;
+
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ return fid_family->pgt_base + num_fids * flood_table->table_index +
+ fid_offset;
+}
+
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
- const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
const struct mlxsw_sp_flood_table *flood_table;
- char *sftr2_pl;
- int err;
+ u16 mid_index;
- if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
+ if (WARN_ON(!fid_family->flood_tables))
return -EINVAL;
flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
if (!flood_table)
return -ESRCH;
- sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL);
- if (!sftr2_pl)
- return -ENOMEM;
-
- mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index,
- ops->flood_index(fid), flood_table->table_type, 1,
- local_port, member);
- err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2),
- sftr2_pl);
- kfree(sftr2_pl);
- return err;
+ mid_index = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table,
+ fid->fid_offset);
+ return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp, mid_index,
+ fid->fid_index, local_port, member);
}
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
@@ -370,11 +387,6 @@ enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
return fid->fid_family->type;
}
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
-{
- fid->rif = rif;
-}
-
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid)
{
return fid->rif;
@@ -405,6 +417,7 @@ static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
u16 vid = *(u16 *) arg;
mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
@@ -413,38 +426,341 @@ static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
MLXSW_REG_SFMR_OP_DESTROY_FID;
}
-static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u16 fid_offset, bool valid)
+static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
- mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index,
- fid_offset);
+ mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid->fid_index,
+ fid->fid_offset, fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- __be32 vni, bool vni_valid, u32 nve_flood_index,
- bool nve_flood_index_valid)
+static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+ fid->fid_index, fid->fid_offset,
+ fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
+ mlxsw_reg_sfmr_vv_set(sfmr_pl, fid->vni_valid);
+ mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(fid->vni));
+ mlxsw_reg_sfmr_vtfp_set(sfmr_pl, fid->nve_flood_index_valid);
+ mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, fid->nve_flood_index);
+
+ if (rif) {
+ mlxsw_reg_sfmr_irif_v_set(sfmr_pl, true);
+ mlxsw_reg_sfmr_irif_set(sfmr_pl, mlxsw_sp_rif_index(rif));
+ }
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid_index,
- 0);
- mlxsw_reg_sfmr_vv_set(sfmr_pl, vni_valid);
- mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(vni));
- mlxsw_reg_sfmr_vtfp_set(sfmr_pl, nve_flood_index_valid);
- mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, nve_flood_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+static int mlxsw_sp_fid_vni_to_fid_map(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif,
+ bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vni_pack(svfa_pl, valid, fid->fid_index,
+ be32_to_cpu(fid->vni), irif_valid, irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_edit_op(fid, rif);
+}
+
+static int mlxsw_sp_fid_vni_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ if (!fid->vni_valid)
+ return 0;
+
+ return mlxsw_sp_fid_vni_to_fid_map(fid, rif, fid->vni_valid);
+}
+
+static int
+mlxsw_sp_fid_vid_to_fid_map(const struct mlxsw_sp_fid *fid, u16 vid, bool valid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vid_pack(svfa_pl, valid, fid->fid_index, vid, irif_valid,
+ irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int
+mlxsw_sp_fid_8021q_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ /* Update the global VID => FID mapping we created when the FID was
+ * configured.
+ */
+ return mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, rif);
+}
+
+static int
+mlxsw_sp_fid_port_vid_to_fid_rif_update_one(const struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_fid_port_vid *pv,
+ bool irif_valid, u16 irif_index)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, pv->local_port, true,
+ fid->fid_index, pv->vid, irif_valid,
+ irif_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_vid_to_fid_rif_set(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+ u16 irif_index;
+ int err;
+
+ err = fid->fid_family->ops->vid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ irif_index = mlxsw_sp_rif_index(rif);
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated with the
+ * ingress RIF.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ err = mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv,
+ true,
+ irif_index);
+ if (err)
+ goto err_port_vid_to_fid_rif_update_one;
+ }
+
+ return 0;
+
+err_port_vid_to_fid_rif_update_one:
+ list_for_each_entry_continue_reverse(pv, &fid->port_vid_list, list) {
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+static void mlxsw_sp_fid_vid_to_fid_rif_unset(const struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_reiv_handle(struct mlxsw_sp_fid *fid, u16 rif_index,
+ bool valid, u8 port_page)
+{
+ u16 local_port_end = (port_page + 1) * MLXSW_REG_REIV_REC_MAX_COUNT - 1;
+ u16 local_port_start = port_page * MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *port_vid;
+ u8 rec_num, entries_num = 0;
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+
+ list_for_each_entry(port_vid, &fid->port_vid_list, list) {
+ /* port_vid_list is sorted by local_port. */
+ if (port_vid->local_port < local_port_start)
+ continue;
+
+ if (port_vid->local_port > local_port_end)
+ break;
+
+ rec_num = port_vid->local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num,
+ valid ? port_vid->vid : 0);
+ entries_num++;
+ }
+
+ if (!entries_num) {
+ kfree(reiv_pl);
+ return 0;
+ }
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ if (err)
+ goto err_reg_write;
+
+ kfree(reiv_pl);
+ return 0;
+
+err_reg_write:
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_erif_eport_to_vid_map(struct mlxsw_sp_fid *fid,
+ u16 rif_index, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u8 num_port_pages;
+ int err, i;
+
+ num_port_pages = mlxsw_core_max_ports(mlxsw_sp->core) /
+ MLXSW_REG_REIV_REC_MAX_COUNT + 1;
+
+ for (i = 0; i < num_port_pages; i++) {
+ err = mlxsw_sp_fid_reiv_handle(fid, rif_index, valid, i);
+ if (err)
+ goto err_reiv_handle;
+ }
+
+ return 0;
+
+err_reiv_handle:
+ for (; i >= 0; i--)
+ mlxsw_sp_fid_reiv_handle(fid, rif_index, !valid, i);
+ return err;
+}
+
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
+{
+ u16 rif_index = mlxsw_sp_rif_index(rif);
+ int err;
+
+ err = mlxsw_sp_fid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni_to_fid_rif_update(fid, rif);
+ if (err)
+ goto err_vni_to_fid_rif_update;
+
+ err = mlxsw_sp_fid_vid_to_fid_rif_set(fid, rif);
+ if (err)
+ goto err_vid_to_fid_rif_set;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, true);
+ if (err)
+ goto err_erif_eport_to_vid_map;
+
+ fid->rif = rif;
+ return 0;
+
+err_erif_eport_to_vid_map:
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+err_vid_to_fid_rif_set:
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+err_vni_to_fid_rif_update:
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid)
+{
+ u16 rif_index;
+
+ if (!fid->rif)
+ return;
+
+ rif_index = mlxsw_sp_rif_index(fid->rif);
+ fid->rif = NULL;
+
+ mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, false);
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_vni_op(const struct mlxsw_sp_fid *fid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, fid->vni_valid);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_edit_op(fid, fid->rif);
+ if (err)
+ goto err_fid_edit_op;
+
+ return 0;
+
+err_fid_edit_op:
+ mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, !fid->vni_valid);
+ return err;
+}
+
+static int __mlxsw_sp_fid_port_vid_map(const struct mlxsw_sp_fid *fid,
u16 local_port, u16 vid, bool valid)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid = false;
+ u16 irif_index = 0;
+
+ if (fid->rif) {
+ irif_valid = true;
+ irif_index = mlxsw_sp_rif_index(fid->rif);
+ }
- mlxsw_reg_svfa_port_vid_pack(svfa_pl, local_port, valid, fid_index,
- vid);
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, local_port, valid, fid->fid_index,
+ vid, irif_valid, irif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
@@ -459,20 +775,19 @@ static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
int br_ifindex = *(int *) arg;
mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_op(fid_family->mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
{
if (fid->vni_valid)
mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_8021d_index_alloc(struct mlxsw_sp_fid *fid,
@@ -498,14 +813,8 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
}
-static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_index - VLAN_N_VID;
-}
-
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
int err;
@@ -517,7 +826,7 @@ static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ err = __mlxsw_sp_fid_port_vid_map(fid,
mlxsw_sp_port->local_port,
vid, true);
if (err)
@@ -540,8 +849,7 @@ err_fid_port_vid_map:
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
return err;
@@ -549,7 +857,6 @@ err_fid_port_vid_map:
static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
@@ -562,12 +869,108 @@ static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
}
+static int
+mlxsw_sp_fid_port_vid_list_add(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp_port_vid;
+
+ port_vid = kzalloc(sizeof(*port_vid), GFP_KERNEL);
+ if (!port_vid)
+ return -ENOMEM;
+
+ port_vid->local_port = local_port;
+ port_vid->vid = vid;
+
+ list_for_each_entry(tmp_port_vid, &fid->port_vid_list, list) {
+ if (tmp_port_vid->local_port > local_port)
+ break;
+ }
+
+ list_add_tail(&port_vid->list, &tmp_port_vid->list);
+ return 0;
+}
+
+static void
+mlxsw_sp_fid_port_vid_list_del(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp;
+
+ list_for_each_entry_safe(port_vid, tmp, &fid->port_vid_list, list) {
+ if (port_vid->local_port != local_port || port_vid->vid != vid)
+ continue;
+
+ list_del(&port_vid->list);
+ kfree(port_vid);
+ return;
+ }
+}
+
+static int
+mlxsw_sp_fid_mpe_table_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char smpe_pl[MLXSW_REG_SMPE_LEN];
+
+ mlxsw_reg_smpe_pack(smpe_pl, local_port, fid->fid_index,
+ valid ? vid : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smpe), smpe_pl);
+}
+
+static int
+mlxsw_sp_fid_erif_eport_to_vid_map_one(const struct mlxsw_sp_fid *fid,
+ u16 local_port, u16 vid, bool valid)
+{
+ u8 port_page = local_port / MLXSW_REG_REIV_REC_MAX_COUNT;
+ u8 rec_num = local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u16 rif_index = mlxsw_sp_rif_index(fid->rif);
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num, valid ? vid : 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_evid_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, valid);
+ if (err)
+ return err;
+
+ if (!fid->rif)
+ return 0;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ valid);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+
+ return 0;
+
+err_erif_eport_to_vid_map_one:
+ mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, !valid);
+ return err;
+}
+
static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
@@ -576,11 +979,20 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, true);
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
if (err)
return err;
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -591,8 +1003,11 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
return err;
}
@@ -606,43 +1021,29 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
}
-static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, vni,
- true, fid->nve_flood_index,
- fid->nve_flood_index_valid);
+ return mlxsw_sp_fid_vni_op(fid);
}
static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, 0, false,
- fid->nve_flood_index, fid->nve_flood_index_valid);
+ mlxsw_sp_fid_vni_op(fid);
}
-static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index)
+static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index,
- fid->vni, fid->vni_valid, nve_flood_index,
- true);
+ return mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, fid->vni,
- fid->vni_valid, 0, false);
+ mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void
@@ -652,13 +1053,19 @@ mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, 0);
}
+static int
+mlxsw_sp_fid_8021d_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
.deconfigure = mlxsw_sp_fid_8021d_deconfigure,
.index_alloc = mlxsw_sp_fid_8021d_index_alloc,
.compare = mlxsw_sp_fid_8021d_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
.port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
.vni_set = mlxsw_sp_fid_8021d_vni_set,
@@ -666,42 +1073,32 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
};
+#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
+#define MLXSW_SP_FID_RFID_MAX (11 * 1024)
+#define MLXSW_SP_FID_8021Q_PGT_BASE 0
+#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX)
+
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
{
.packet_type = MLXSW_SP_FLOOD_TYPE_UC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 0,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_MC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 1,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_BC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 2,
},
};
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
- .type = MLXSW_SP_FID_TYPE_8021D,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
- .start_index = VLAN_N_VID,
- .end_index = VLAN_N_VID + MLXSW_SP_FID_8021D_MAX - 1,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_FID,
- .ops = &mlxsw_sp_fid_8021d_ops,
- .lag_vid_valid = 1,
-};
-
static bool
mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
{
@@ -717,48 +1114,19 @@ mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = {
- .setup = mlxsw_sp_fid_8021q_setup,
- .configure = mlxsw_sp_fid_8021d_configure,
- .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
- .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
- .compare = mlxsw_sp_fid_8021q_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
- .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
- .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
- .vni_set = mlxsw_sp_fid_8021d_vni_set,
- .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
- .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
- .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
- .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
-};
-
-/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */
-#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX)
-#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \
- VLAN_VID_MASK - 2)
-
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = {
- .type = MLXSW_SP_FID_TYPE_8021Q,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
- .start_index = MLXSW_SP_FID_8021Q_EMU_START,
- .end_index = MLXSW_SP_FID_8021Q_EMU_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_emu_ops,
- .lag_vid_valid = 1,
-};
+static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
{
- /* rFIDs are allocated by the device during init */
- return 0;
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
{
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
@@ -787,9 +1155,28 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- /* We only need to transition the port to virtual mode since
- * {Port, VID} => FID is done by the firmware upon RIF creation.
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ return err;
+
+ /* Using legacy bridge model, we only need to transition the port to
+ * virtual mode since {Port, VID} => FID is done by the firmware upon
+ * RIF creation. Using unified bridge model, we need to map
+ * {Port, VID} => FID and map egress VID.
*/
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
+ if (err)
+ goto err_port_vid_map;
+
+ if (fid->rif) {
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port,
+ vid, true);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+ }
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -800,6 +1187,13 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+err_erif_eport_to_vid_map_one:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+err_port_vid_map:
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
return err;
}
@@ -813,39 +1207,69 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+}
+
+static int mlxsw_sp_fid_rfid_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_rfid_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int
+mlxsw_sp_fid_rfid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
}
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
+ .setup = mlxsw_sp_fid_rfid_setup,
.configure = mlxsw_sp_fid_rfid_configure,
.deconfigure = mlxsw_sp_fid_rfid_deconfigure,
.index_alloc = mlxsw_sp_fid_rfid_index_alloc,
.compare = mlxsw_sp_fid_rfid_compare,
.port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_rfid_vni_set,
+ .vni_clear = mlxsw_sp_fid_rfid_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
};
-#define MLXSW_SP_RFID_BASE (15 * 1024)
-#define MLXSW_SP_RFID_MAX 1024
-
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
- .type = MLXSW_SP_FID_TYPE_RFID,
- .fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = MLXSW_SP_RFID_BASE,
- .end_index = MLXSW_SP_RFID_BASE + MLXSW_SP_RFID_MAX - 1,
- .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
- .ops = &mlxsw_sp_fid_rfid_ops,
-};
+static void mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
-
- return mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_dummy_deconfigure(struct mlxsw_sp_fid *fid)
{
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_dummy_index_alloc(struct mlxsw_sp_fid *fid,
@@ -862,26 +1286,252 @@ static bool mlxsw_sp_fid_dummy_compare(const struct mlxsw_sp_fid *fid,
return true;
}
+static int mlxsw_sp_fid_dummy_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_dummy_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
+ .setup = mlxsw_sp_fid_dummy_setup,
.configure = mlxsw_sp_fid_dummy_configure,
.deconfigure = mlxsw_sp_fid_dummy_deconfigure,
.index_alloc = mlxsw_sp_fid_dummy_index_alloc,
.compare = mlxsw_sp_fid_dummy_compare,
+ .vni_set = mlxsw_sp_fid_dummy_vni_set,
+ .vni_clear = mlxsw_sp_fid_dummy_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_dummy_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_dummy_nve_flood_index_clear,
+};
+
+static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ int err;
+
+ err = mlxsw_sp_fid_op(fid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, fid->rif);
+ if (err)
+ goto err_vid_to_fid_map;
+
+ return 0;
+
+err_vid_to_fid_map:
+ mlxsw_sp_fid_op(fid, false);
+ return err;
+}
+
+static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ if (fid->vni_valid)
+ mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
+
+ mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, false, NULL);
+ mlxsw_sp_fid_op(fid, false);
+}
+
+static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ /* In case there are no {Port, VID} => FID mappings on the port,
+ * we can use the global VID => FID mapping we created when the
+ * FID was configured, otherwise, configure new mapping.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]) {
+ err = __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, true);
+ if (err)
+ return err;
+ }
+
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
+ return 0;
+
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+};
+
+/* There are 4K-2 802.1Q FIDs */
+#define MLXSW_SP_FID_8021Q_START 1 /* FID 0 is reserved. */
+#define MLXSW_SP_FID_8021Q_END (MLXSW_SP_FID_8021Q_START + \
+ MLXSW_SP_FID_8021Q_MAX - 1)
+
+/* There are 1K 802.1D FIDs */
+#define MLXSW_SP_FID_8021D_START (MLXSW_SP_FID_8021Q_END + 1)
+#define MLXSW_SP_FID_8021D_END (MLXSW_SP_FID_8021D_START + \
+ MLXSW_SP_FID_8021D_MAX - 1)
+
+/* There is one dummy FID */
+#define MLXSW_SP_FID_DUMMY (MLXSW_SP_FID_8021D_END + 1)
+
+/* There are 11K rFIDs */
+#define MLXSW_SP_RFID_START (MLXSW_SP_FID_DUMMY + 1)
+#define MLXSW_SP_RFID_END (MLXSW_SP_RFID_START + \
+ MLXSW_SP_FID_RFID_MAX - 1)
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_START,
+ .end_index = MLXSW_SP_RFID_END,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops,
+ .flood_rsp = true,
+ .smpe_index_valid = false,
+};
+
+const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp1_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp1_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp1_fid_dummy_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = true,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = VLAN_N_VID - 1,
- .end_index = VLAN_N_VID - 1,
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
.ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
};
-static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
- [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
+const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
- [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
};
static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
@@ -919,6 +1569,8 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&fid->port_vid_list);
fid->fid_family = fid_family;
err = fid->fid_family->ops->index_alloc(fid, arg, &fid_index);
@@ -927,8 +1579,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid->fid_index = fid_index;
__set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
- if (fid->fid_family->ops->setup)
- fid->fid_family->ops->setup(fid, arg);
+ fid->fid_family->ops->setup(fid, arg);
err = fid->fid_family->ops->configure(fid);
if (err)
@@ -967,6 +1618,7 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
fid->fid_family->ops->deconfigure(fid);
__clear_bit(fid->fid_index - fid_family->start_index,
fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid->port_vid_list));
kfree(fid);
}
@@ -1010,26 +1662,49 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
const struct mlxsw_sp_flood_table *flood_table)
{
enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
const int *sfgc_packet_types;
- int i;
+ u16 num_fids, mid_base;
+ int err, i;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base, num_fids);
+ if (err)
+ return err;
sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
- struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
char sfgc_pl[MLXSW_REG_SFGC_LEN];
- int err;
if (!sfgc_packet_types[i])
continue;
- mlxsw_reg_sfgc_pack(sfgc_pl, i, flood_table->bridge_type,
- flood_table->table_type,
- flood_table->table_index);
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
+ flood_table->table_type, 0, mid_base);
+
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
if (err)
- return err;
+ goto err_reg_write;
}
return 0;
+
+err_reg_write:
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_flood_table_fini(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 num_fids, mid_base;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
}
static int
@@ -1050,6 +1725,19 @@ mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
return 0;
}
+static void
+mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &fid_family->flood_tables[i];
+ mlxsw_sp_fid_flood_table_fini(fid_family, flood_table);
+ }
+}
+
static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fid_family *tmpl)
{
@@ -1091,6 +1779,10 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid_family *fid_family)
{
mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
+
+ if (fid_family->flood_tables)
+ mlxsw_sp_fid_flood_tables_fini(fid_family);
+
bitmap_free(fid_family->fids_bitmap);
WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
kfree(fid_family);
@@ -1144,7 +1836,7 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
err = mlxsw_sp_fid_family_register(mlxsw_sp,
- mlxsw_sp_fid_family_arr[i]);
+ mlxsw_sp->fid_family_arr[i]);
if (err)
goto err_fid_ops_register;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
new file mode 100644
index 000000000000..7dd3dba0fa83
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/refcount.h>
+#include <linux/idr.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_pgt {
+ struct idr pgt_idr;
+ u16 end_index; /* Exclusive. */
+ struct mutex lock; /* Protects PGT. */
+ bool smpe_index_valid;
+};
+
+struct mlxsw_sp_pgt_entry {
+ struct list_head ports_list;
+ u16 index;
+ u16 smpe_index;
+};
+
+struct mlxsw_sp_pgt_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+};
+
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid)
+{
+ int index, err = 0;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
+ mlxsw_sp->pgt->end_index, GFP_KERNEL);
+
+ if (index < 0) {
+ err = index;
+ goto err_idr_alloc;
+ }
+
+ *p_mid = index;
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
+{
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int
+mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ unsigned int idr_cursor;
+ int i, err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ /* This function is supposed to be called several times as part of
+ * driver init, in specific order. Verify that the mid_index is the
+ * first free index in the idr, to be able to free the indexes in case
+ * of error.
+ */
+ idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
+ if (WARN_ON(idr_cursor != mid_base)) {
+ err = -EINVAL;
+ goto err_idr_cursor;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
+ mid_base, mid_base + count, GFP_KERNEL);
+ if (err < 0)
+ goto err_idr_alloc_cyclic;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc_cyclic:
+ for (i--; i >= 0; i--)
+ idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
+err_idr_cursor:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void
+mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr;
+ int i;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ for (i = 0; i < count; i++)
+ WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i));
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+
+ list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) {
+ if (pgt_entry_port->local_port == local_port)
+ return pgt_entry_port;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ void *ret;
+ int err;
+
+ pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL);
+ if (!pgt_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid);
+ if (IS_ERR(ret)) {
+ err = PTR_ERR(ret);
+ goto err_idr_replace;
+ }
+
+ INIT_LIST_HEAD(&pgt_entry->ports_list);
+ pgt_entry->index = mid;
+ pgt_entry->smpe_index = smpe;
+ return pgt_entry;
+
+err_idr_replace:
+ kfree(pgt_entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt,
+ struct mlxsw_sp_pgt_entry *pgt_entry)
+{
+ WARN_ON(!list_empty(&pgt_entry->ports_list));
+
+ pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index);
+ if (WARN_ON(IS_ERR(pgt_entry)))
+ return;
+
+ kfree(pgt_entry);
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (pgt_entry)
+ return pgt_entry;
+
+ return mlxsw_sp_pgt_entry_create(pgt, mid, smpe);
+}
+
+static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (WARN_ON(!pgt_entry))
+ return;
+
+ if (list_empty(&pgt_entry->ports_list))
+ mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
+}
+
+static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
+ bool member)
+{
+ mlxsw_reg_smid2_port_set(smid2_pl, local_port, member);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1);
+}
+
+static int
+mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port, bool member)
+{
+ char *smid2_pl;
+ int err;
+
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0,
+ mlxsw_sp->pgt->smpe_index_valid,
+ pgt_entry->smpe_index);
+
+ mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+
+ kfree(smid2_pl);
+
+ return err;
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ int err;
+
+ pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL);
+ if (!pgt_entry_port)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port,
+ true);
+ if (err)
+ goto err_pgt_entry_port_write;
+
+ pgt_entry_port->local_port = local_port;
+ list_add(&pgt_entry_port->list, &pgt_entry->ports_list);
+
+ return pgt_entry_port;
+
+err_pgt_entry_port_write:
+ kfree(pgt_entry_port);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port)
+
+{
+ list_del(&pgt_entry_port->list);
+ mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry,
+ pgt_entry_port->local_port, false);
+ kfree(pgt_entry_port);
+}
+
+static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ int err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe);
+ if (IS_ERR(pgt_entry)) {
+ err = PTR_ERR(pgt_entry);
+ goto err_pgt_entry_get;
+ }
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry,
+ local_port);
+ if (IS_ERR(pgt_entry_port)) {
+ err = PTR_ERR(pgt_entry_port);
+ goto err_pgt_entry_port_get;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_pgt_entry_port_get:
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+err_pgt_entry_get:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp,
+ u16 mid, u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid);
+ if (!pgt_entry)
+ goto out;
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port);
+ if (!pgt_entry_port)
+ goto out;
+
+ mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port);
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+
+out:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member)
+{
+ if (member)
+ return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe,
+ local_port);
+
+ mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port);
+ return 0;
+}
+
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_pgt *pgt;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE))
+ return -EIO;
+
+ pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL);
+ if (!pgt)
+ return -ENOMEM;
+
+ idr_init(&pgt->pgt_idr);
+ pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE);
+ mutex_init(&pgt->lock);
+ pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid;
+ mlxsw_sp->pgt = pgt;
+ return 0;
+}
+
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->pgt->lock);
+ WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr));
+ idr_destroy(&mlxsw_sp->pgt->pgt_idr);
+ kfree(mlxsw_sp->pgt);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4c7721506603..09009e80cd71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4323,6 +4323,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_nexthop_neigh_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
return err;
}
@@ -6498,6 +6500,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
struct net_device *dev = rt->fib6_nh->fib_nh_dev;
+ int err;
nh->nhgi = nh_grp->nhgi;
nh->nh_weight = rt->fib6_nh->fib_nh_weight;
@@ -6513,7 +6516,16 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
return 0;
nh->ifindex = dev->ifindex;
- return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ if (err)
+ goto err_nexthop_type_init;
+
+ return 0;
+
+err_nexthop_type_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ return err;
}
static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
@@ -9304,17 +9316,18 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif_subport *rif_subport;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 efid;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
+ efid = mlxsw_sp_fid_index(rif->fid);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
- rif_subport->vid);
-
+ efid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9339,9 +9352,15 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
@@ -9353,7 +9372,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9377,10 +9396,9 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
.fid_get = mlxsw_sp_rif_subport_fid_get,
};
-static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
- enum mlxsw_reg_ritr_if_type type,
- u16 vid_fid, bool enable)
+static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -9388,7 +9406,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
- mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
+ mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9412,10 +9430,9 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
return err;
rif->mac_profile_id = mac_profile;
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
- true);
+ err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
if (err)
- goto err_rif_vlan_fid_op;
+ goto err_rif_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
@@ -9432,9 +9449,15 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
@@ -9442,8 +9465,8 @@ err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
-err_rif_vlan_fid_op:
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
+err_rif_fid_op:
mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
@@ -9454,7 +9477,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9462,7 +9485,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
@@ -9539,11 +9562,119 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
+static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr,
+ rif->mac_profile_id, vid, efid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
+ struct netlink_ext_ack *extack)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u8 mac_profile;
+ int err;
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
+ if (err)
+ goto err_rif_vlan_fid_op;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_mc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
+ return 0;
+
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+err_rif_fdb_op:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_mc_flood_set:
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+err_rif_vlan_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_fid_rif_unset(rif->fid);
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
+}
+
+static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_fid_configure,
- .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .configure = mlxsw_sp1_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
+static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ u16 efid = mlxsw_sp_fid_index(rif->fid);
+
+ return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp2_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
@@ -9618,7 +9749,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
};
@@ -9806,7 +9937,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index b5c83ec7a87f..c5dfb972b433 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -82,7 +82,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
-u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a738d0264e53..4efccd942fb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -48,7 +48,8 @@ struct mlxsw_sp_bridge_device {
struct net_device *dev;
struct list_head list;
struct list_head ports_list;
- struct list_head mids_list;
+ struct list_head mdb_list;
+ struct rhashtable mdb_ht;
u8 vlan_enabled:1,
multicast_enabled:1,
mrouter:1;
@@ -102,6 +103,33 @@ struct mlxsw_sp_switchdev_ops {
void (*init)(struct mlxsw_sp *mlxsw_sp);
};
+struct mlxsw_sp_mdb_entry_key {
+ unsigned char addr[ETH_ALEN];
+ u16 fid;
+};
+
+struct mlxsw_sp_mdb_entry {
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_mdb_entry_key key;
+ u16 mid;
+ struct list_head ports_list;
+ u16 ports_count;
+};
+
+struct mlxsw_sp_mdb_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+ refcount_t refcount;
+ bool mrouter;
+};
+
+static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
+};
+
static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
@@ -109,12 +137,13 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port);
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device
- *bridge_device);
+ *bridge_device, bool mc_enabled);
static void
mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -237,6 +266,10 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
if (!bridge_device)
return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_mdb_rhashtable_init;
+
bridge_device->dev = br_dev;
bridge_device->vlan_enabled = vlan_enabled;
bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
@@ -254,7 +287,8 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
- INIT_LIST_HEAD(&bridge_device->mids_list);
+ INIT_LIST_HEAD(&bridge_device->mdb_list);
+
if (list_empty(&bridge->bridges_list))
mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
list_add(&bridge_device->list, &bridge->bridges_list);
@@ -273,6 +307,8 @@ err_vxlan_init:
list_del(&bridge_device->list);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
+ rhashtable_destroy(&bridge_device->mdb_ht);
+err_mdb_rhashtable_init:
kfree(bridge_device);
return ERR_PTR(err);
}
@@ -290,7 +326,8 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
WARN_ON(!list_empty(&bridge_device->ports_list));
- WARN_ON(!list_empty(&bridge_device->mids_list));
+ WARN_ON(!list_empty(&bridge_device->mdb_list));
+ rhashtable_destroy(&bridge_device->mdb_ht);
kfree(bridge_device);
}
@@ -643,6 +680,64 @@ err_port_bridge_vlan_flood_set:
}
static int
+mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type, local_port, member);
+ if (err)
+ goto err_fid_flood_set;
+ }
+
+ return 0;
+
+err_fid_flood_set:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &bridge_vlan->port_vlan_list,
+ list) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
+ local_port, !member);
+ }
+
+ return err;
+}
+
+static int
+mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ member);
+ if (err)
+ goto err_bridge_vlans_flood_set;
+ }
+
+ return 0;
+
+err_bridge_vlans_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ !member);
+ return err;
+}
+
+static int
mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan,
bool set)
@@ -813,6 +908,9 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port)
return 0;
+ mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
+ is_port_mrouter);
+
if (!bridge_port->bridge_device->multicast_enabled)
goto out;
@@ -822,8 +920,6 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
- is_port_mrouter);
out:
bridge_port->mrouter = is_port_mrouter;
return 0;
@@ -842,6 +938,7 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *orig_dev,
bool mc_disabled)
{
+ enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
@@ -854,43 +951,184 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_device)
return 0;
- if (bridge_device->multicast_enabled != !mc_disabled) {
- bridge_device->multicast_enabled = !mc_disabled;
- mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
- bridge_device);
- }
+ if (bridge_device->multicast_enabled == !mc_disabled)
+ return 0;
+
+ bridge_device->multicast_enabled = !mc_disabled;
+ err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ !mc_disabled);
+ if (err)
+ goto err_mc_enable_sync;
list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
- enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
bool member = mlxsw_sp_mc_flood(bridge_port);
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
- bridge_port,
- packet_type, member);
+ err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
+ packet_type,
+ member);
if (err)
- return err;
+ goto err_flood_table_set;
}
- bridge_device->multicast_enabled = !mc_disabled;
-
return 0;
+
+err_flood_table_set:
+ list_for_each_entry_continue_reverse(bridge_port,
+ &bridge_device->ports_list, list) {
+ bool member = mlxsw_sp_mc_flood(bridge_port);
+
+ mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
+ !member);
+ }
+ mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ mc_disabled);
+err_mc_enable_sync:
+ bridge_device->multicast_enabled = mc_disabled;
+ return err;
+}
+
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
+ if (mdb_entry_port->local_port == local_port)
+ return mdb_entry_port;
+ }
+
+ return NULL;
}
-static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
- u16 mid_idx, bool add)
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
{
- char *smid2_pl;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
int err;
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count++;
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
- mlxsw_sp_router_port(mlxsw_sp), add, false, 0);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+ mdb_entry->ports_count++;
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port, bool force)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count--;
+ return;
+ }
+
+ mdb_entry->ports_count--;
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+}
+
+static __always_unused struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ int err;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (!mdb_entry_port->mrouter)
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ mdb_entry_port->mrouter = true;
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static __always_unused void
+mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!mdb_entry_port->mrouter)
+ return;
+
+ mdb_entry_port->mrouter = false;
+ if (!refcount_dec_and_test(&mdb_entry_port->refcount))
+ return;
+
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
}
static void
@@ -898,10 +1136,17 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device,
bool add)
{
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- list_for_each_entry(mid, &bridge_device->mids_list, list)
- mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
+ }
}
static int
@@ -1127,14 +1372,13 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
struct mlxsw_sp_bridge_vlan *bridge_vlan;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid = mlxsw_sp_port_vlan->vid;
- bool last_port, last_vlan;
+ bool last_port;
if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
- last_vlan = list_is_singular(&bridge_port->vlans_list);
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
last_port = list_is_singular(&bridge_vlan->port_vlan_list);
@@ -1146,8 +1390,9 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
bridge_port,
mlxsw_sp_fid_index(fid));
- if (last_vlan)
- mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
+
+ mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
+ mlxsw_sp_fid_index(fid));
mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
@@ -1436,7 +1681,8 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
}
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
+ const char *mac, u16 fid, u16 vid,
+ bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
{
@@ -1449,7 +1695,8 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
+ local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1464,18 +1711,18 @@ out:
}
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
- bool dynamic)
+ const char *mac, u16 fid, u16 vid,
+ bool adding, bool dynamic)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
- MLXSW_REG_SFD_REC_ACTION_NOP,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
+ adding, MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
@@ -1537,7 +1784,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
bridge_port->system_port,
- fdb_info->addr, fid_index,
+ fdb_info->addr, fid_index, vid,
adding, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
@@ -1546,8 +1793,9 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
vid, adding, false);
}
-static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
- u16 fid, u16 mid_idx, bool adding)
+static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mdb_entry *mdb_entry,
+ bool adding)
{
char *sfd_pl;
u8 num_rec;
@@ -1558,8 +1806,9 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
+ mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
+ mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mdb_entry->mid);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1573,80 +1822,17 @@ out:
return err;
}
-static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
- long *ports_bitmap,
- bool set_router_port)
-{
- char *smid2_pl;
- int err, i;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false, false, 0);
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
- if (mlxsw_sp->ports[i])
- mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
- }
-
- mlxsw_reg_smid2_port_mask_set(smid2_pl,
- mlxsw_sp_router_port(mlxsw_sp), 1);
-
- for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
- mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
-
- mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
- set_router_port);
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 mid_idx, bool add)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *smid2_pl;
- int err;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add,
- false, 0);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
-{
- struct mlxsw_sp_mid *mid;
-
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
- return mid;
- }
- return NULL;
-}
-
static void
mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
- unsigned long *ports_bitmap)
+ struct mlxsw_sp_ports_bitmap *ports_bm)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u64 max_lag_members, i;
int lag_id;
if (!bridge_port->lagged) {
- set_bit(bridge_port->system_port, ports_bitmap);
+ set_bit(bridge_port->system_port, ports_bm->bitmap);
} else {
max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_LAG_MEMBERS);
@@ -1656,13 +1842,13 @@ mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
lag_id, i);
if (mlxsw_sp_port)
set_bit(mlxsw_sp_port->local_port,
- ports_bitmap);
+ ports_bm->bitmap);
}
}
}
static void
-mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
+mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp *mlxsw_sp)
{
@@ -1672,116 +1858,226 @@ mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
if (bridge_port->mrouter) {
mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
bridge_port,
- flood_bitmap);
+ flood_bm);
}
}
}
-static bool
-mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid,
- struct mlxsw_sp_bridge_device *bridge_device)
+static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ unsigned int nbits = ports_bm->nbits;
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, nbits) {
+ mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
+ mdb_entry,
+ i);
+ if (IS_ERR(mdb_entry_port)) {
+ nbits = i;
+ goto err_mrouter_port_get;
+ }
+ }
+
+ return 0;
+
+err_mrouter_port_get:
+ for_each_set_bit(i, ports_bm->bitmap, nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+ return PTR_ERR(mdb_entry_port);
+}
+
+static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+}
+
+static int
+mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
{
- long *flood_bitmap;
- int num_of_ports;
- u16 mid_idx;
+ struct mlxsw_sp_ports_bitmap ports_bm;
int err;
- mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
- MLXSW_SP_MID_MAX);
- if (mid_idx == MLXSW_SP_MID_MAX)
- return false;
+ err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
+ if (err)
+ return err;
- num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
- if (!flood_bitmap)
- return false;
+ mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
+
+ if (add)
+ err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
+ mdb_entry);
+ else
+ mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
+
+ mlxsw_sp_port_bitmap_fini(&ports_bm);
+ return err;
+}
- bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
- mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
- mid->mid = mid_idx;
- err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
- bridge_device->mrouter);
- bitmap_free(flood_bitmap);
+ ether_addr_copy(mdb_entry->key.addr, addr);
+ mdb_entry->key.fid = fid;
+ err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
if (err)
- return false;
+ goto err_pgt_mid_alloc;
+
+ INIT_LIST_HEAD(&mdb_entry->ports_list);
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
- true);
+ err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
+ true);
if (err)
- return false;
+ goto err_mdb_mrouters_set;
- set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = true;
- return true;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port)) {
+ err = PTR_ERR(mdb_entry_port);
+ goto err_mdb_entry_port_get;
+ }
+
+ if (bridge_device->multicast_enabled) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
+ if (err)
+ goto err_mdb_entry_write;
+ }
+
+ err = rhashtable_insert_fast(&bridge_device->mdb_ht,
+ &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
+
+ return mdb_entry;
+
+err_rhashtable_insert:
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+err_mdb_entry_write:
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
+err_mdb_entry_port_get:
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+err_mdb_mrouters_set:
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+err_pgt_mid_alloc:
+ kfree(mdb_entry);
+ return ERR_PTR(err);
}
-static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ u16 local_port, bool force)
{
- if (!mid->in_hw)
- return 0;
-
- clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = false;
- return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
- false);
+ list_del(&mdb_entry->list);
+ rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+ WARN_ON(!list_empty(&mdb_entry->ports_list));
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+ kfree(mdb_entry);
}
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
{
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- mid = kzalloc(sizeof(*mid), GFP_KERNEL);
- if (!mid)
- return NULL;
+ ether_addr_copy(key.addr, addr);
+ key.fid = fid;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (mdb_entry) {
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
- GFP_KERNEL);
- if (!mid->ports_in_mid)
- goto err_ports_in_mid_alloc;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
+ mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port))
+ return ERR_CAST(mdb_entry_port);
- ether_addr_copy(mid->addr, addr);
- mid->fid = fid;
- mid->in_hw = false;
+ return mdb_entry;
+ }
- if (!bridge_device->multicast_enabled)
- goto out;
+ return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
+ local_port);
+}
- if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
- goto err_write_mdb_entry;
+static bool
+mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_mdb_entry_port *removed_entry_port,
+ bool force)
+{
+ if (mdb_entry->ports_count > 1)
+ return false;
-out:
- list_add_tail(&mid->list, &bridge_device->mids_list);
- return mid;
+ if (force)
+ return true;
-err_write_mdb_entry:
- bitmap_free(mid->ports_in_mid);
-err_ports_in_mid_alloc:
- kfree(mid);
- return NULL;
+ if (!removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 1)
+ return false;
+
+ if (removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 2)
+ return false;
+
+ return true;
}
-static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
+ bool force)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err = 0;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
- if (bitmap_empty(mid->ports_in_mid,
- mlxsw_core_max_ports(mlxsw_sp->core))) {
- err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
- list_del(&mid->list);
- bitmap_free(mid->ports_in_mid);
- kfree(mid);
- }
- return err;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ /* Avoid a temporary situation in which the MDB entry points to an empty
+ * PGT entry, as otherwise packets will be temporarily dropped instead
+ * of being flooded. Instead, in this situation, call
+ * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
+ * then releases the PGT entry.
+ */
+ if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
+ mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
+ local_port, force);
+ else
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
+ force);
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1790,12 +2086,10 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
- int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
@@ -1810,54 +2104,35 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
- fid_index);
- if (!mid) {
- netdev_err(dev, "Unable to allocate MC group\n");
- return -ENOMEM;
- }
- }
- set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
-
- if (!bridge_device->multicast_enabled)
- return 0;
-
- if (bridge_port->mrouter)
- return 0;
-
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
- if (err) {
- netdev_err(dev, "Unable to set SMID\n");
- goto err_out;
- }
+ mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
+ mdb->addr, fid_index,
+ mlxsw_sp_port->local_port);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
return 0;
-
-err_out:
- mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- return err;
}
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_device
- *bridge_device)
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool mc_enabled)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_mid *mid;
- bool mc_enabled;
-
- mc_enabled = bridge_device->multicast_enabled;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (mc_enabled)
- mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
- bridge_device);
- else
- mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
+ if (err)
+ goto err_mdb_entry_write;
}
+ return 0;
+
+err_mdb_entry_write:
+ list_for_each_entry_continue_reverse(mdb_entry,
+ &bridge_device->mdb_list, list)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
+ return err;
}
static void
@@ -1865,14 +2140,20 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
bool add)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_port->local_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
}
}
@@ -1950,28 +2231,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int
-__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_mid *mid)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- if (bridge_port->bridge_device->multicast_enabled &&
- !bridge_port->mrouter) {
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- if (err)
- netdev_err(dev, "Unable to remove port from SMID\n");
- }
-
- err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- if (err)
- netdev_err(dev, "Unable to remove MC SFD\n");
-
- return err;
-}
-
static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -1981,7 +2240,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
@@ -1997,32 +2257,44 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
+ ether_addr_copy(key.addr, mdb->addr);
+ key.fid = fid_index;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (!mdb_entry) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
}
- return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ mlxsw_sp_port->local_port, false);
+ return 0;
}
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port)
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid, *tmp;
+ struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
+ u16 local_port = mlxsw_sp_port->local_port;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
- if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
- __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
- mid);
- } else if (bridge_device->multicast_enabled &&
- bridge_port->mrouter) {
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- }
+ list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
+ list) {
+ if (mdb_entry->key.fid != fid_index)
+ continue;
+
+ if (bridge_port->mrouter)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
+ mdb_entry,
+ local_port);
+
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ local_port, true);
}
}
@@ -2634,10 +2906,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
+ u16 local_port, vid, fid, evid = 0;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
- u16 local_port;
- u16 vid, fid;
bool do_notification = true;
int err;
@@ -2668,9 +2939,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ evid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
- err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
+ err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
adding, true);
if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
@@ -2730,8 +3002,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
- lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
- mlxsw_sp_port_vlan->vid : 0;
+ lag_vid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 79ecf296161e..a9a1dea6d731 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1212,8 +1212,8 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
/* SGMII/1000/2500BASE-X PCS power down */
mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
- if (ret < 0)
- return ret;
+ if (mii_ctl < 0)
+ return mii_ctl;
mii_ctl |= BMCR_PDOWN;
ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 5784c4161e5e..1d6e3b641b2e 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -994,7 +994,7 @@ static int lan966x_probe(struct platform_device *pdev)
struct fwnode_handle *ports, *portnp;
struct lan966x *lan966x;
u8 mac_addr[ETH_ALEN];
- int err, i;
+ int err;
lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
if (!lan966x)
@@ -1025,11 +1025,7 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
return dev_err_probe(&pdev->dev, err, "Reset failed");
- i = 0;
- fwnode_for_each_available_child_node(ports, portnp)
- ++i;
-
- lan966x->num_phys_ports = i;
+ lan966x->num_phys_ports = NUM_PHYS_PORTS;
lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
sizeof(struct lan966x_port *),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 3b86ddddc756..2787055c1847 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -34,6 +34,7 @@
/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
#define QSYS_Q_RSRV 95
+#define NUM_PHYS_PORTS 8
#define CPU_PORT 8
/* Reserved PGIDs */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 40ef9fad3a77..ec07f7d0528c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -397,6 +397,9 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
bool is_host;
int res, err;
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
is_host = netif_is_bridge_master(v->obj.orig_dev);
/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
@@ -480,6 +483,9 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
u32 mact_entry, res, pgid_entry[3], misc_cfg;
bool host_ena;
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
if (!br_vlan_enabled(spx5->hw_bridge_dev))
vid = 1;
else
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 61497c3e4cfb..971dde8c3286 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2692,7 +2692,7 @@ again:
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header can be at most 1KB long */
- cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ cum_len = -skb_tcp_all_headers(skb);
/* for IPv6 TSO, the checksum offset stores the
* TCP header length, to save the firmware from
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 50bca486a244..9aae7f1eb5d2 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -158,7 +158,7 @@ MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
I. Board Compatibility
This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
-It also works with other chips in in the DP83810 series.
+It also works with other chips in the DP83810 series.
II. Board-specific settings
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 0c0d127906dd..09a89e72f904 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -32,28 +32,4 @@ config S2IO
To compile this driver as a module, choose M here. The module
will be called s2io.
-config VXGE
- tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter"
- depends on PCI
- help
- This driver supports Exar Corp's X3100 Series 10 GbE PCIe
- I/O Virtualized Server Adapter. These were originally released from
- Neterion, which was later acquired by Exar. So, the adapters might be
- labeled as either one, depending on its age.
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/ethernet/neterion/vxge.rst>.
-
- To compile this driver as a module, choose M here. The module
- will be called vxge.
-
-config VXGE_DEBUG_TRACE_ALL
- bool "Enabling All Debug trace statements in driver"
- default n
- depends on VXGE
- help
- Say Y here if you want to enabling all the debug trace statements in
- the vxge driver. By default only few debug trace statements are
- enabled.
-
endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile
index 87ede8a47bb8..de98b4e6eff9 100644
--- a/drivers/net/ethernet/neterion/Makefile
+++ b/drivers/net/ethernet/neterion/Makefile
@@ -4,4 +4,3 @@
#
obj-$(CONFIG_S2IO) += s2io.o
-obj-$(CONFIG_VXGE) += vxge/
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6dd451adc331..30f955efa830 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2156,7 +2156,7 @@ static int verify_xena_quiescence(struct s2io_nic *sp)
/*
* In PCI 33 mode, the P_PLL is not used, and therefore,
- * the the P_PLL_LOCK bit in the adapter_status register will
+ * the P_PLL_LOCK bit in the adapter_status register will
* not be asserted.
*/
if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
@@ -3817,7 +3817,7 @@ static irqreturn_t s2io_test_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Test interrupt path by forcing a a software IRQ */
+/* Test interrupt path by forcing a software IRQ */
static int s2io_test_msi(struct s2io_nic *sp)
{
struct pci_dev *pdev = sp->pdev;
@@ -5492,7 +5492,7 @@ s2io_ethtool_gringparam(struct net_device *dev,
}
/**
- * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
+ * s2io_ethtool_getpause_data -Pause frame generation and reception.
* @dev: pointer to netdev
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
@@ -7449,7 +7449,7 @@ aggregate:
* @link : inidicates whether link is UP/DOWN.
* Description:
* This function stops/starts the Tx queue depending on whether the link
- * status of the NIC is is down or up. This is called by the Alarm
+ * status of the NIC is down or up. This is called by the Alarm
* interrupt handler whenever a link change interrupt comes up.
* Return value:
* void.
@@ -7732,7 +7732,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
* Setting the device configuration parameters.
* Most of these parameters can be specified by the user during
* module insertion as they are module loadable parameters. If
- * these parameters are not not specified during load time, they
+ * these parameters are not specified during load time, they
* are initialized with default values.
*/
config = &sp->config;
diff --git a/drivers/net/ethernet/neterion/vxge/Makefile b/drivers/net/ethernet/neterion/vxge/Makefile
deleted file mode 100644
index 0820e81ca7fb..000000000000
--- a/drivers/net/ethernet/neterion/vxge/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O
-# Virtualized Server Adapter linux driver
-
-obj-$(CONFIG_VXGE) += vxge.o
-
-vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
deleted file mode 100644
index a3204a7ef750..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ /dev/null
@@ -1,5099 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/vmalloc.h>
-#include <linux/etherdevice.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-main.h"
-
-#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
- status = __vxge_hw_vpath_stats_access(vpath, \
- VXGE_HW_STATS_OP_READ, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
-static void
-vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
-{
- u64 val64;
-
- val64 = readq(&vp_reg->rxmac_vcfg0);
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- writeq(val64, &vp_reg->rxmac_vcfg0);
- val64 = readq(&vp_reg->rxmac_vcfg0);
-}
-
-/*
- * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
- */
-int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct __vxge_hw_virtualpath *vpath;
- u64 val64, rxd_count, rxd_spat;
- int count = 0, total_count = 0;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
-
- vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
-
- /* Check that the ring controller for this vpath has enough free RxDs
- * to send frames to the host. This is done by reading the
- * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
- * RXD_SPAT value for the vpath.
- */
- val64 = readq(&vp_reg->prc_cfg6);
- rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
- /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
- * leg room.
- */
- rxd_spat *= 2;
-
- do {
- mdelay(1);
-
- rxd_count = readq(&vp_reg->prc_rxd_doorbell);
-
- /* Check that the ring controller for this vpath does
- * not have any frame in its pipeline.
- */
- val64 = readq(&vp_reg->frm_in_progress_cnt);
- if ((rxd_count <= rxd_spat) || (val64 > 0))
- count = 0;
- else
- count++;
- total_count++;
- } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
- (total_count < VXGE_HW_MAX_POLLING_COUNT));
-
- if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
- printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
- __func__);
-
- return total_count;
-}
-
-/* vxge_hw_device_wait_receive_idle - This function waits until all frames
- * stored in the frame buffer for each vpath assigned to the given
- * function (hldev) have been sent to the host.
- */
-void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
-{
- int i, total_count = 0;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
- if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
- break;
- }
-}
-
-/*
- * __vxge_hw_device_register_poll
- * Will poll certain register for specified amount of time.
- * Will poll until masked bit is not cleared.
- */
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
-{
- u64 val64;
- u32 i = 0;
-
- udelay(10);
-
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- udelay(100);
- } while (++i <= 9);
-
- i = 0;
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- mdelay(1);
- } while (++i <= max_millis);
-
- return VXGE_HW_FAIL;
-}
-
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
- u64 mask, u32 max_millis)
-{
- __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
- wmb();
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
- wmb();
-
- return __vxge_hw_device_register_poll(addr, mask, max_millis);
-}
-
-static enum vxge_hw_status
-vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
- u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
- u64 *steer_ctrl)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- enum vxge_hw_status status;
- u64 val64;
- u32 retry = 0, max_retry = 3;
-
- spin_lock(&vpath->lock);
- if (!vpath->vp_open) {
- spin_unlock(&vpath->lock);
- max_retry = 100;
- }
-
- writeq(*data0, &vp_reg->rts_access_steer_data0);
- writeq(*data1, &vp_reg->rts_access_steer_data1);
- wmb();
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- *steer_ctrl;
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- /* The __vxge_hw_device_register_poll can udelay for a significant
- * amount of time, blocking other process from the CPU. If it delays
- * for ~5secs, a NMI error can occur. A way around this is to give up
- * the processor via msleep, but this is not allowed is under lock.
- * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
- * 1sec and sleep for 10ms until the firmware operation has completed
- * or timed-out.
- */
- while ((status != VXGE_HW_OK) && retry++ < max_retry) {
- if (!vpath->vp_open)
- msleep(20);
- status = __vxge_hw_device_register_poll(
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
- }
-
- if (status != VXGE_HW_OK)
- goto out;
-
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- *data0 = readq(&vp_reg->rts_access_steer_data0);
- *data1 = readq(&vp_reg->rts_access_steer_data1);
- *steer_ctrl = val64;
- } else
- status = VXGE_HW_FAIL;
-
-out:
- if (vpath->vp_open)
- spin_unlock(&vpath->lock);
- return status;
-}
-
-enum vxge_hw_status
-vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
- u32 *minor, u32 *build)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_READ,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
- *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
- *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-
- return status;
-}
-
-enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- u32 ret;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
- goto exit;
- }
-
- ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
- if (ret != 1) {
- vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
- __func__, ret);
- status = VXGE_HW_FAIL;
- }
-
-exit:
- return status;
-}
-
-enum vxge_hw_status
-vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- int ret_code, sec_code;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- /* send upgrade start command */
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_START,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
- __func__);
- return status;
- }
-
- /* Transfer fw image to adapter 16 bytes at a time */
- for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
- steer_ctrl = 0;
-
- /* The next 128bits of fwdata to be loaded onto the adapter */
- data0 = *((u64 *)fwdata);
- data1 = *((u64 *)fwdata + 1);
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_SEND,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
- __func__);
- goto out;
- }
-
- ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
- switch (ret_code) {
- case VXGE_HW_FW_UPGRADE_OK:
- /* All OK, send next 16 bytes. */
- break;
- case VXGE_FW_UPGRADE_BYTES2SKIP:
- /* skip bytes in the stream */
- fwdata += (data0 >> 8) & 0xFFFFFFFF;
- break;
- case VXGE_HW_FW_UPGRADE_DONE:
- goto out;
- case VXGE_HW_FW_UPGRADE_ERR:
- sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
- switch (sec_code) {
- case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
- case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
- printk(KERN_ERR
- "corrupted data from .ncf file\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
- printk(KERN_ERR "invalid .ncf file\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
- printk(KERN_ERR "buffer overflow\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
- printk(KERN_ERR "failed to flash the image\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
- printk(KERN_ERR
- "generic error. Unknown error type\n");
- break;
- default:
- printk(KERN_ERR "Unknown error of type %d\n",
- sec_code);
- break;
- }
- status = VXGE_HW_FAIL;
- goto out;
- default:
- printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
- status = VXGE_HW_FAIL;
- goto out;
- }
- /* point to next 16 bytes */
- fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
- }
-out:
- return status;
-}
-
-enum vxge_hw_status
-vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
- struct eprom_image *img)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- int i;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
- data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_API_GET_EPROM_REV,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- break;
-
- img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
- img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
- img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
- img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_channel_free - Free memory allocated for channel
- * This function deallocates memory from the channel and various arrays
- * in the channel
- */
-static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
-{
- kfree(channel->work_arr);
- kfree(channel->free_arr);
- kfree(channel->reserve_arr);
- kfree(channel->orig_arr);
- kfree(channel);
-}
-
-/*
- * __vxge_hw_channel_initialize - Initialize a channel
- * This function initializes a channel by properly setting the
- * various references
- */
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
-{
- u32 i;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = channel->vph->vpath;
-
- if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
- for (i = 0; i < channel->length; i++)
- channel->orig_arr[i] = channel->reserve_arr[i];
- }
-
- switch (channel->type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- vpath->fifoh = (struct __vxge_hw_fifo *)channel;
- channel->stats = &((struct __vxge_hw_fifo *)
- channel)->stats->common_stats;
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- vpath->ringh = (struct __vxge_hw_ring *)channel;
- channel->stats = &((struct __vxge_hw_ring *)
- channel)->stats->common_stats;
- break;
- default:
- break;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_channel_reset - Resets a channel
- * This function resets a channel by properly setting the various references
- */
-static enum vxge_hw_status
-__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
-{
- u32 i;
-
- for (i = 0; i < channel->length; i++) {
- if (channel->reserve_arr != NULL)
- channel->reserve_arr[i] = channel->orig_arr[i];
- if (channel->free_arr != NULL)
- channel->free_arr[i] = NULL;
- if (channel->work_arr != NULL)
- channel->work_arr[i] = NULL;
- }
- channel->free_ptr = channel->length;
- channel->reserve_ptr = channel->length;
- channel->reserve_top = 0;
- channel->post_index = 0;
- channel->compl_index = 0;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_pci_e_init
- * Initialize certain PCI/PCI-X configuration registers
- * with recommended values. Save config space for future hw resets.
- */
-static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
-{
- u16 cmd = 0;
-
- /* Set the PErr Repconse bit and SERR in PCI command register. */
- pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
- cmd |= 0x140;
- pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
-
- pci_save_state(hldev->pdev);
-}
-
-/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
- * in progress
- * This routine checks the vpath reset in progress register is turned zero
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
-{
- enum vxge_hw_status status;
- status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
- VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
- return status;
-}
-
-/*
- * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
- * Set the swapper bits appropriately for the lagacy section.
- */
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- wmb();
-
- switch (val64) {
- case VXGE_HW_SWAPPER_INITIAL_VALUE:
- return status;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- break;
-
- case VXGE_HW_SWAPPER_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
- }
-
- wmb();
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
- status = VXGE_HW_ERR_SWAPPER_CTRL;
-
- return status;
-}
-
-/*
- * __vxge_hw_device_toc_get
- * This routine sets the swapper and reads the toc pointer and returns the
- * memory mapped address of the toc
- */
-static struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0)
-{
- u64 val64;
- struct vxge_hw_toc_reg __iomem *toc = NULL;
- enum vxge_hw_status status;
-
- struct vxge_hw_legacy_reg __iomem *legacy_reg =
- (struct vxge_hw_legacy_reg __iomem *)bar0;
-
- status = __vxge_hw_legacy_swapper_set(legacy_reg);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&legacy_reg->toc_first_pointer);
- toc = bar0 + val64;
-exit:
- return toc;
-}
-
-/*
- * __vxge_hw_device_reg_addr_get
- * This routine sets the swapper and reads the toc pointer and initializes the
- * register location pointers in the device object. It waits until the ric is
- * completed initializing registers.
- */
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
- u32 i;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- hldev->legacy_reg = hldev->bar0;
-
- hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
- if (hldev->toc_reg == NULL) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- val64 = readq(&hldev->toc_reg->toc_common_pointer);
- hldev->common_reg = hldev->bar0 + val64;
-
- val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
- hldev->mrpcim_reg = hldev->bar0 + val64;
-
- for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
- hldev->srpcim_reg[i] = hldev->bar0 + val64;
- }
-
- for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
- hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
- }
-
- for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
- hldev->vpath_reg[i] = hldev->bar0 + val64;
- }
-
- val64 = readq(&hldev->toc_reg->toc_kdfc);
-
- switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
- case 0:
- hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
- break;
- default:
- break;
- }
-
- status = __vxge_hw_device_vpath_reset_in_prog_check(
- (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
- * This routine returns the Access Rights of the driver
- */
-static u32
-__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
-{
- u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
-
- switch (host_type) {
- case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
- if (func_id == 0) {
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- }
- break;
- case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
- case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
- case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
- break;
- case VXGE_HW_SR_VH_FUNCTION0:
- case VXGE_HW_VH_NORMAL_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- }
-
- return access_rights;
-}
-/*
- * __vxge_hw_device_is_privilaged
- * This routine checks if the device function is privilaged or not
- */
-
-enum vxge_hw_status
-__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
-{
- if (__vxge_hw_device_access_rights_get(host_type,
- func_id) &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
- return VXGE_HW_OK;
- else
- return VXGE_HW_ERR_PRIVILEGED_OPERATION;
-}
-
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
- u64 val64;
-
- val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
- return
- (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_device_host_info_get
- * This routine returns the host type assignments
- */
-static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
- u32 i;
-
- val64 = readq(&hldev->common_reg->host_type_assignments);
-
- hldev->host_type =
- (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
-
- hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpath_assignments & vxge_mBIT(i)))
- continue;
-
- hldev->func_id =
- __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
-
- hldev->access_rights = __vxge_hw_device_access_rights_get(
- hldev->host_type, hldev->func_id);
-
- hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
- hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
-
- hldev->first_vp_id = i;
- break;
- }
-}
-
-/*
- * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
- * link width and signalling rate.
- */
-static enum vxge_hw_status
-__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
-{
- struct pci_dev *dev = hldev->pdev;
- u16 lnk;
-
- /* Get the negotiated link width and speed from PCI config space */
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
-
- if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
- return VXGE_HW_ERR_INVALID_PCI_INFO;
-
- switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
- case PCIE_LNK_WIDTH_RESRV:
- case PCIE_LNK_X1:
- case PCIE_LNK_X2:
- case PCIE_LNK_X4:
- case PCIE_LNK_X8:
- break;
- default:
- return VXGE_HW_ERR_INVALID_PCI_INFO;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_initialize
- * Initialize Titan-V hardware.
- */
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id)) {
- /* Validate the pci-e link width and speed */
- status = __vxge_hw_verify_pci_e_info(hldev);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
- struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
- struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
- struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- fw_date->day =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
- fw_date->month =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
- fw_date->year =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
-
- snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- fw_date->month, fw_date->day, fw_date->year);
-
- fw_version->major =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
- fw_version->minor =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
- fw_version->build =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-
- snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- fw_version->major, fw_version->minor, fw_version->build);
-
- flash_date->day =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
- flash_date->month =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
- flash_date->year =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
-
- snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- flash_date->month, flash_date->day, flash_date->year);
-
- flash_version->major =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
- flash_version->minor =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
- flash_version->build =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
-
- snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- flash_version->major, flash_version->minor,
- flash_version->build);
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- __be64 *serial_number = (void *)hw_info->serial_number;
- __be64 *product_desc = (void *)hw_info->product_desc;
- __be64 *part_number = (void *)hw_info->part_number;
- enum vxge_hw_status status;
- u64 data0, data1 = 0, steer_ctrl = 0;
- u32 i, j = 0;
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- serial_number[0] = cpu_to_be64(data0);
- serial_number[1] = cpu_to_be64(data1);
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- part_number[0] = cpu_to_be64(data0);
- part_number[1] = cpu_to_be64(data1);
-
- for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
- i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
- data0 = i;
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- product_desc[j++] = cpu_to_be64(data0);
- product_desc[j++] = cpu_to_be64(data1);
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- data0 = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_API_GET_FUNC_MODE,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
- return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- * from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
- u8 *macaddr, u8 *macaddr_mask)
-{
- u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- data0 = 0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
- int i;
-
- do {
- status = vxge_hw_vpath_fw_api(vpath, action,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
- data1);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i - 1] = (u8) (data0 & 0xFF);
- data0 >>= 8;
-
- macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
- data1 >>= 8;
- }
-
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
- data0 = 0, data1 = 0, steer_ctrl = 0;
-
- } while (!is_valid_ether_addr(macaddr));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_device_hw_info_get - Get the hw information
- * @bar0: the bar
- * @hw_info: the hw_info struct
- *
- * Returns the vpath mask that has the bits set for each vpath allocated
- * for the driver, FW version information, and the first mac address for
- * each vpath
- */
-enum vxge_hw_status
-vxge_hw_device_hw_info_get(void __iomem *bar0,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u32 i;
- u64 val64;
- struct vxge_hw_toc_reg __iomem *toc;
- struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- enum vxge_hw_status status;
- struct __vxge_hw_virtualpath vpath;
-
- memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
-
- toc = __vxge_hw_device_toc_get(bar0);
- if (toc == NULL) {
- status = VXGE_HW_ERR_CRITICAL;
- goto exit;
- }
-
- val64 = readq(&toc->toc_common_pointer);
- common_reg = bar0 + val64;
-
- status = __vxge_hw_device_vpath_reset_in_prog_check(
- (u64 __iomem *)&common_reg->vpath_rst_in_prog);
- if (status != VXGE_HW_OK)
- goto exit;
-
- hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
-
- val64 = readq(&common_reg->host_type_assignments);
-
- hw_info->host_type =
- (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
- continue;
-
- val64 = readq(&toc->toc_vpmgmt_pointer[i]);
-
- vpmgmt_reg = bar0 + val64;
-
- hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
- if (__vxge_hw_device_access_rights_get(hw_info->host_type,
- hw_info->func_id) &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
-
- val64 = readq(&toc->toc_mrpcim_pointer);
-
- mrpcim_reg = bar0 + val64;
-
- writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
- wmb();
- }
-
- val64 = readq(&toc->toc_vpath_pointer[i]);
-
- spin_lock_init(&vpath.lock);
- vpath.vp_reg = bar0 + val64;
- vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
-
- status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- break;
- }
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
- continue;
-
- val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath.vp_reg = bar0 + val64;
- vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
-
- status = __vxge_hw_vpath_addr_get(&vpath,
- hw_info->mac_addrs[i],
- hw_info->mac_addr_masks[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_destroy - Deallocates the block pool
- */
-static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
-{
- struct __vxge_hw_device *hldev;
- struct list_head *p, *n;
-
- if (!blockpool)
- return;
-
- hldev = blockpool->hldev;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
- dma_unmap_single(&hldev->pdev->dev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(hldev->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- blockpool->pool_size--;
- }
-
- list_for_each_safe(p, n, &blockpool->free_entry_list) {
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- }
-
- return;
-}
-
-/*
- * __vxge_hw_blockpool_create - Create block pool
- */
-static enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max)
-{
- u32 i;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (blockpool == NULL) {
- status = VXGE_HW_FAIL;
- goto blockpool_create_exit;
- }
-
- blockpool->hldev = hldev;
- blockpool->block_size = VXGE_HW_BLOCK_SIZE;
- blockpool->pool_size = 0;
- blockpool->pool_max = pool_max;
- blockpool->req_out = 0;
-
- INIT_LIST_HEAD(&blockpool->free_block_list);
- INIT_LIST_HEAD(&blockpool->free_entry_list);
-
- for (i = 0; i < pool_size + pool_max; i++) {
- entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- list_add(&entry->item, &blockpool->free_entry_list);
- }
-
- for (i = 0; i < pool_size; i++) {
- memblock = vxge_os_dma_malloc(
- hldev->pdev,
- VXGE_HW_BLOCK_SIZE,
- &dma_handle,
- &acc_handle);
- if (memblock == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- dma_addr = dma_map_single(&hldev->pdev->dev, memblock,
- VXGE_HW_BLOCK_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) {
- vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry =
- kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry != NULL) {
- list_del(&entry->item);
- entry->length = VXGE_HW_BLOCK_SIZE;
- entry->memblock = memblock;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- } else {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- }
-
-blockpool_create_exit:
- return status;
-}
-
-/*
- * __vxge_hw_device_fifo_config_check - Check fifo configuration.
- * Check the fifo configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
-{
- if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
- (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_vpath_config_check - Check vpath configuration.
- * Check the vpath configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
-{
- enum vxge_hw_status status;
-
- if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
- (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
- return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
-
- status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
- if (status != VXGE_HW_OK)
- return status;
-
- if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
- ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
- (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
- return VXGE_HW_BADCFG_VPATH_MTU;
-
- if ((vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
- return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_config_check - Check device configuration.
- * Check the device configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
-{
- u32 i;
- enum vxge_hw_status status;
-
- if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
- return VXGE_HW_BADCFG_INTR_MODE;
-
- if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
- (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
- return VXGE_HW_BADCFG_RTS_MAC_EN;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- status = __vxge_hw_device_vpath_config_check(
- &new_config->vp_config[i]);
- if (status != VXGE_HW_OK)
- return status;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_device_initialize - Initialize Titan device.
- * Initialize Titan device. Note that all the arguments of this public API
- * are 'IN', including @hldev. Driver cooperates with
- * OS to find new Titan device, locate its PCI and memory spaces.
- *
- * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
- * to enable the latter to perform Titan hardware initialization.
- */
-enum vxge_hw_status
-vxge_hw_device_initialize(
- struct __vxge_hw_device **devh,
- struct vxge_hw_device_attr *attr,
- struct vxge_hw_device_config *device_config)
-{
- u32 i;
- u32 nblocks = 0;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- status = __vxge_hw_device_config_check(device_config);
- if (status != VXGE_HW_OK)
- goto exit;
-
- hldev = vzalloc(sizeof(struct __vxge_hw_device));
- if (hldev == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- hldev->magic = VXGE_HW_DEVICE_MAGIC;
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
-
- /* apply config */
- memcpy(&hldev->config, device_config,
- sizeof(struct vxge_hw_device_config));
-
- hldev->bar0 = attr->bar0;
- hldev->pdev = attr->pdev;
-
- hldev->uld_callbacks = attr->uld_callbacks;
-
- __vxge_hw_device_pci_e_init(hldev);
-
- status = __vxge_hw_device_reg_addr_get(hldev);
- if (status != VXGE_HW_OK) {
- vfree(hldev);
- goto exit;
- }
-
- __vxge_hw_device_host_info_get(hldev);
-
- /* Incrementing for stats blocks */
- nblocks++;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpath_assignments & vxge_mBIT(i)))
- continue;
-
- if (device_config->vp_config[i].ring.enable ==
- VXGE_HW_RING_ENABLE)
- nblocks += device_config->vp_config[i].ring.ring_blocks;
-
- if (device_config->vp_config[i].fifo.enable ==
- VXGE_HW_FIFO_ENABLE)
- nblocks += device_config->vp_config[i].fifo.fifo_blocks;
- nblocks++;
- }
-
- if (__vxge_hw_blockpool_create(hldev,
- &hldev->block_pool,
- device_config->dma_blockpool_initial + nblocks,
- device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
-
- vxge_hw_device_terminate(hldev);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- status = __vxge_hw_device_initialize(hldev);
- if (status != VXGE_HW_OK) {
- vxge_hw_device_terminate(hldev);
- goto exit;
- }
-
- *devh = hldev;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_terminate - Terminate Titan device.
- * Terminate HW device.
- */
-void
-vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
-{
- vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
-
- hldev->magic = VXGE_HW_DEVICE_DEAD;
- __vxge_hw_blockpool_destroy(&hldev->block_pool);
- vfree(hldev);
-}
-
-/*
- * __vxge_hw_vpath_stats_access - Get the statistics from the given location
- * and offset and perform an operation
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
- u32 operation, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_stats_access_exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->xmac_stats_access_cmd,
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
- vpath->hldev->config.device_poll_millis);
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&vp_reg->xmac_stats_access_data);
- else
- *stat = 0;
-
-vpath_stats_access_exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *)vpath_tx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset, val64);
- if (status != VXGE_HW_OK)
- goto exit;
- offset++;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
- val64 = (u64 *) vpath_rx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset >> 3, val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- val64 = readq(&vp_reg->vpath_debug_stats0);
- hw_stats->ini_num_mwr_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats1);
- hw_stats->ini_num_mrd_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats2);
- hw_stats->ini_num_cpl_rcvd =
- (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats3);
- hw_stats->ini_num_mwr_byte_sent =
- VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats4);
- hw_stats->ini_num_cpl_byte_rcvd =
- VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats5);
- hw_stats->wrcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats6);
- hw_stats->rdcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count0 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count1 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count23);
- hw_stats->vpath_genstats_count2 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count3 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count4);
- hw_stats->vpath_genstats_count4 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count5);
- hw_stats->vpath_genstats_count5 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
- val64);
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
-
- hw_stats->prog_event_vnum0 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
-
- hw_stats->prog_event_vnum1 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
-
- hw_stats->prog_event_vnum2 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
-
- hw_stats->prog_event_vnum3 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
-
- val64 = readq(&vp_reg->rx_multi_cast_stats);
- hw_stats->rx_multi_cast_frame_discard =
- (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
-
- val64 = readq(&vp_reg->rx_frm_transferred);
- hw_stats->rx_frm_transferred =
- (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
-
- val64 = readq(&vp_reg->rxd_returned);
- hw_stats->rxd_returned =
- (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_mpa);
- hw_stats->rx_mpa_len_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
- hw_stats->rx_mpa_mrk_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
- hw_stats->rx_mpa_crc_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_fau);
- hw_stats->rx_permitted_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
- hw_stats->rx_vp_reset_discarded_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
- hw_stats->rx_wol_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
-
- val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
- hw_stats->tx_vp_reset_discarded_frms =
- (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
- val64);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_stats_get - Get the device hw statistics.
- * Returns the vpath h/w stats for the device.
- */
-enum vxge_hw_status
-vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
- struct vxge_hw_device_stats_hw_info *hw_stats)
-{
- u32 i;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
- (hldev->virtual_paths[i].vp_open ==
- VXGE_HW_VP_NOT_OPEN))
- continue;
-
- memcpy(hldev->virtual_paths[i].hw_stats_sav,
- hldev->virtual_paths[i].hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(
- &hldev->virtual_paths[i],
- hldev->virtual_paths[i].hw_stats);
- }
-
- memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
- sizeof(struct vxge_hw_device_stats_hw_info));
-
- return status;
-}
-
-/*
- * vxge_hw_driver_stats_get - Get the device sw statistics.
- * Returns the vpath s/w stats for the device.
- */
-enum vxge_hw_status vxge_hw_driver_stats_get(
- struct __vxge_hw_device *hldev,
- struct vxge_hw_device_stats_sw_info *sw_stats)
-{
- memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
- sizeof(struct vxge_hw_device_stats_sw_info));
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
- * and offset and perform an operation
- * Get the statistics from the given location and offset.
- */
-enum vxge_hw_status
-vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
- u32 operation, u32 location, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
- VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &hldev->mrpcim_reg->xmac_stats_sys_cmd,
- VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
- hldev->config.device_poll_millis);
-
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
- else
- *stat = 0;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
- * Get the Statistics on aggregate port
- */
-static enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
- struct vxge_hw_xmac_aggr_stats *aggr_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *)aggr_stats;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
- status = vxge_hw_mrpcim_stats_access(hldev,
- VXGE_HW_STATS_OP_READ,
- VXGE_HW_STATS_LOC_AGGR,
- ((offset + (104 * port)) >> 3), val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
- * Get the Statistics on port
- */
-static enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
- struct vxge_hw_xmac_port_stats *port_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = 0x0;
- val64 = (u64 *) port_stats;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
- status = vxge_hw_mrpcim_stats_access(hldev,
- VXGE_HW_STATS_OP_READ,
- VXGE_HW_STATS_LOC_AGGR,
- ((offset + (608 * port)) >> 3), val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
- * Get the XMAC Statistics
- */
-enum vxge_hw_status
-vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
- struct vxge_hw_xmac_stats *xmac_stats)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 i;
-
- status = vxge_hw_device_xmac_aggr_stats_get(hldev,
- 0, &xmac_stats->aggr_stats[0]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = vxge_hw_device_xmac_aggr_stats_get(hldev,
- 1, &xmac_stats->aggr_stats[1]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
-
- status = vxge_hw_device_xmac_port_stats_get(hldev,
- i, &xmac_stats->port_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(
- &hldev->virtual_paths[i],
- &xmac_stats->vpath_tx_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(
- &hldev->virtual_paths[i],
- &xmac_stats->vpath_rx_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_debug_set - Set the debug module, level and timestamp
- * This routine is used to dynamically change the debug output
- */
-void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
- enum vxge_debug_level level, u32 mask)
-{
- if (hldev == NULL)
- return;
-
-#if defined(VXGE_DEBUG_TRACE_MASK) || \
- defined(VXGE_DEBUG_ERR_MASK)
- hldev->debug_module_mask = mask;
- hldev->debug_level = level;
-#endif
-
-#if defined(VXGE_DEBUG_ERR_MASK)
- hldev->level_err = level & VXGE_ERR;
-#endif
-
-#if defined(VXGE_DEBUG_TRACE_MASK)
- hldev->level_trace = level & VXGE_TRACE;
-#endif
-}
-
-/*
- * vxge_hw_device_error_level_get - Get the error level
- * This routine returns the current error level set
- */
-u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_ERR_MASK)
- if (hldev == NULL)
- return VXGE_ERR;
- else
- return hldev->level_err;
-#else
- return 0;
-#endif
-}
-
-/*
- * vxge_hw_device_trace_level_get - Get the trace level
- * This routine returns the current trace level set
- */
-u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK)
- if (hldev == NULL)
- return VXGE_TRACE;
- else
- return hldev->level_trace;
-#else
- return 0;
-#endif
-}
-
-/*
- * vxge_hw_getpause_data -Pause frame frame generation and reception.
- * Returns the Pause frame generation and reception capability of the NIC.
- */
-enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
- u32 port, u32 *tx, u32 *rx)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
- status = VXGE_HW_ERR_INVALID_PORT;
- goto exit;
- }
-
- if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- goto exit;
- }
-
- val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
- if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
- *tx = 1;
- if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
- *rx = 1;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_setpause_data - set/reset pause frame generation.
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- */
-enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
- u32 port, u32 tx, u32 rx)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
- status = VXGE_HW_ERR_INVALID_PORT;
- goto exit;
- }
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
- if (tx)
- val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
- else
- val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
- if (rx)
- val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
- else
- val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
-
- writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
-exit:
- return status;
-}
-
-u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
-{
- struct pci_dev *dev = hldev->pdev;
- u16 lnk;
-
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
- return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
-}
-
-/*
- * __vxge_hw_ring_block_memblock_idx - Return the memblock index
- * This function returns the index of memory block
- */
-static inline u32
-__vxge_hw_ring_block_memblock_idx(u8 *block)
-{
- return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
-}
-
-/*
- * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
- * This function sets index to a memory block
- */
-static inline void
-__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
-{
- *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
-}
-
-/*
- * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
- * in RxD block
- * Sets the next block pointer in RxD block
- */
-static inline void
-__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
-{
- *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
-}
-
-/*
- * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
- * first block
- * Returns the dma address of the first RxD block
- */
-static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
-{
- struct vxge_hw_mempool_dma *dma_object;
-
- dma_object = ring->mempool->memblocks_dma_arr;
- vxge_assert(dma_object != NULL);
-
- return dma_object->addr;
-}
-
-/*
- * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
- * This function returns the dma address of a given item
- */
-static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
- void *item)
-{
- u32 memblock_idx;
- void *memblock;
- struct vxge_hw_mempool_dma *memblock_dma_object;
- ptrdiff_t dma_item_offset;
-
- /* get owner memblock index */
- memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
-
- /* get owner memblock by memblock index */
- memblock = mempoolh->memblocks_arr[memblock_idx];
-
- /* get memblock DMA object by memblock index */
- memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
-
- /* calculate offset in the memblock of this item */
- dma_item_offset = (u8 *)item - (u8 *)memblock;
-
- return memblock_dma_object->addr + dma_item_offset;
-}
-
-/*
- * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
- * This function returns the dma address of a given item
- */
-static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
- struct __vxge_hw_ring *ring, u32 from,
- u32 to)
-{
- u8 *to_item , *from_item;
- dma_addr_t to_dma;
-
- /* get "from" RxD block */
- from_item = mempoolh->items_arr[from];
- vxge_assert(from_item);
-
- /* get "to" RxD block */
- to_item = mempoolh->items_arr[to];
- vxge_assert(to_item);
-
- /* return address of the beginning of previous RxD block */
- to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
-
- /* set next pointer for this RxD block to point on
- * previous item's DMA start address */
- __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
-}
-
-/*
- * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
- * block callback
- * This function is callback passed to __vxge_hw_mempool_create to create memory
- * pool for RxD block
- */
-static void
-__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index, u32 is_last)
-{
- u32 i;
- void *item = mempoolh->items_arr[index];
- struct __vxge_hw_ring *ring =
- (struct __vxge_hw_ring *)mempoolh->userdata;
-
- /* format rxds array */
- for (i = 0; i < ring->rxds_per_block; i++) {
- void *rxdblock_priv;
- void *uld_priv;
- struct vxge_hw_ring_rxd_1 *rxdp;
-
- u32 reserve_index = ring->channel.reserve_ptr -
- (index * ring->rxds_per_block + i + 1);
- u32 memblock_item_idx;
-
- ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
- i * ring->rxd_size;
-
- /* Note: memblock_item_idx is index of the item within
- * the memblock. For instance, in case of three RxD-blocks
- * per memblock this value can be 0, 1 or 2. */
- rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
- memblock_index, item,
- &memblock_item_idx);
-
- rxdp = ring->channel.reserve_arr[reserve_index];
-
- uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
-
- /* pre-format Host_Control */
- rxdp->host_control = (u64)(size_t)uld_priv;
- }
-
- __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
-
- if (is_last) {
- /* link last one with first one */
- __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
- }
-
- if (index > 0) {
- /* link this RxD block with previous one */
- __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
- }
-}
-
-/*
- * __vxge_hw_ring_replenish - Initial replenish of RxDs
- * This function replenishes the RxDs from reserve array to work array
- */
-static enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
-{
- void *rxd;
- struct __vxge_hw_channel *channel;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- channel = &ring->channel;
-
- while (vxge_hw_channel_dtr_count(channel) > 0) {
-
- status = vxge_hw_ring_rxd_reserve(ring, &rxd);
-
- vxge_assert(status == VXGE_HW_OK);
-
- if (ring->rxd_init) {
- status = ring->rxd_init(rxd, channel->userdata);
- if (status != VXGE_HW_OK) {
- vxge_hw_ring_rxd_free(ring, rxd);
- goto exit;
- }
- }
-
- vxge_hw_ring_rxd_post(ring, rxd);
- }
- status = VXGE_HW_OK;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_channel_allocate - Allocate memory for channel
- * This function allocates required memory for the channel and various arrays
- * in the channel
- */
-static struct __vxge_hw_channel *
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type,
- u32 length, u32 per_dtr_space,
- void *userdata)
-{
- struct __vxge_hw_channel *channel;
- struct __vxge_hw_device *hldev;
- int size = 0;
- u32 vp_id;
-
- hldev = vph->vpath->hldev;
- vp_id = vph->vpath->vp_id;
-
- switch (type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- size = sizeof(struct __vxge_hw_fifo);
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- size = sizeof(struct __vxge_hw_ring);
- break;
- default:
- break;
- }
-
- channel = kzalloc(size, GFP_KERNEL);
- if (channel == NULL)
- goto exit0;
- INIT_LIST_HEAD(&channel->item);
-
- channel->common_reg = hldev->common_reg;
- channel->first_vp_id = hldev->first_vp_id;
- channel->type = type;
- channel->devh = hldev;
- channel->vph = vph;
- channel->userdata = userdata;
- channel->per_dtr_space = per_dtr_space;
- channel->length = length;
- channel->vp_id = vp_id;
-
- channel->work_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->work_arr == NULL)
- goto exit1;
-
- channel->free_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->free_arr == NULL)
- goto exit1;
- channel->free_ptr = length;
-
- channel->reserve_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->reserve_arr == NULL)
- goto exit1;
- channel->reserve_ptr = length;
- channel->reserve_top = 0;
-
- channel->orig_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->orig_arr == NULL)
- goto exit1;
-
- return channel;
-exit1:
- __vxge_hw_channel_free(channel);
-
-exit0:
- return NULL;
-}
-
-/*
- * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
- * Adds a block to block pool
- */
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
-{
- struct __vxge_hw_blockpool *blockpool;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- dma_addr_t dma_addr;
-
- blockpool = &devh->block_pool;
-
- if (block_addr == NULL) {
- blockpool->req_out--;
- goto exit;
- }
-
- dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length,
- DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) {
- vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
- blockpool->req_out--;
- goto exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry) {
- entry->length = length;
- entry->memblock = block_addr;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_h;
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- blockpool->req_out--;
-
-exit:
- return;
-}
-
-static inline void
-vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
-{
- void *vaddr;
-
- vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-/*
- * __vxge_hw_blockpool_blocks_add - Request additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
-{
- u32 nreq = 0, i;
-
- if ((blockpool->pool_size + blockpool->req_out) <
- VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
- nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
- blockpool->req_out += nreq;
- }
-
- for (i = 0; i < nreq; i++)
- vxge_os_dma_malloc_async(
- (blockpool->hldev)->pdev,
- blockpool->hldev, VXGE_HW_BLOCK_SIZE);
-}
-
-/*
- * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
- * Allocates a block of memory of given size, either from block pool
- * or by calling vxge_os_dma_malloc()
- */
-static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- void *memblock = NULL;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
-
- memblock = vxge_os_dma_malloc(devh->pdev, size,
- &dma_object->handle,
- &dma_object->acc_handle);
-
- if (!memblock)
- goto exit;
-
- dma_object->addr = dma_map_single(&devh->pdev->dev, memblock,
- size, DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) {
- vxge_os_dma_free(devh->pdev, memblock,
- &dma_object->acc_handle);
- memblock = NULL;
- goto exit;
- }
-
- } else {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- dma_object->addr = entry->dma_addr;
- dma_object->handle = entry->dma_handle;
- dma_object->acc_handle = entry->acc_handle;
- memblock = entry->memblock;
-
- list_add(&entry->item,
- &blockpool->free_entry_list);
- blockpool->pool_size--;
- }
-
- if (memblock != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
- }
-exit:
- return memblock;
-}
-
-/*
- * __vxge_hw_blockpool_blocks_remove - Free additional blocks
- */
-static void
-__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
-
- if (blockpool->pool_size < blockpool->pool_max)
- break;
-
- dma_unmap_single(&(blockpool->hldev)->pdev->dev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(
- (blockpool->hldev)->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
-
- list_add(p, &blockpool->free_entry_list);
-
- blockpool->pool_size--;
-
- }
-}
-
-/*
- * __vxge_hw_blockpool_free - Frees the memory allcoated with
- * __vxge_hw_blockpool_malloc
- */
-static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
- void *memblock, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
- dma_unmap_single(&devh->pdev->dev, dma_object->addr, size,
- DMA_BIDIRECTIONAL);
- vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
- } else {
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = vmalloc(sizeof(
- struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry != NULL) {
- entry->length = size;
- entry->memblock = memblock;
- entry->dma_addr = dma_object->addr;
- entry->acc_handle = dma_object->acc_handle;
- entry->dma_handle = dma_object->handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
- if (status == VXGE_HW_OK)
- __vxge_hw_blockpool_blocks_remove(blockpool);
- }
-}
-
-/*
- * vxge_hw_mempool_destroy
- */
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
-{
- u32 i, j;
- struct __vxge_hw_device *devh = mempool->devh;
-
- for (i = 0; i < mempool->memblocks_allocated; i++) {
- struct vxge_hw_mempool_dma *dma_object;
-
- vxge_assert(mempool->memblocks_arr[i]);
- vxge_assert(mempool->memblocks_dma_arr + i);
-
- dma_object = mempool->memblocks_dma_arr + i;
-
- for (j = 0; j < mempool->items_per_memblock; j++) {
- u32 index = i * mempool->items_per_memblock + j;
-
- /* to skip last partially filled(if any) memblock */
- if (index >= mempool->items_current)
- break;
- }
-
- vfree(mempool->memblocks_priv_arr[i]);
-
- __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
- mempool->memblock_size, dma_object);
- }
-
- vfree(mempool->items_arr);
- vfree(mempool->memblocks_dma_arr);
- vfree(mempool->memblocks_priv_arr);
- vfree(mempool->memblocks_arr);
- vfree(mempool);
-}
-
-/*
- * __vxge_hw_mempool_grow
- * Will resize mempool up to %num_allocate value.
- */
-static enum vxge_hw_status
-__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
- u32 *num_allocated)
-{
- u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
- u32 n_items = mempool->items_per_memblock;
- u32 start_block_idx = mempool->memblocks_allocated;
- u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- *num_allocated = 0;
-
- if (end_block_idx > mempool->memblocks_max) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- for (i = start_block_idx; i < end_block_idx; i++) {
- u32 j;
- u32 is_last = ((end_block_idx - 1) == i);
- struct vxge_hw_mempool_dma *dma_object =
- mempool->memblocks_dma_arr + i;
- void *the_memblock;
-
- /* allocate memblock's private part. Each DMA memblock
- * has a space allocated for item's private usage upon
- * mempool's user request. Each time mempool grows, it will
- * allocate new memblock and its private part at once.
- * This helps to minimize memory usage a lot. */
- mempool->memblocks_priv_arr[i] =
- vzalloc(array_size(mempool->items_priv_size, n_items));
- if (mempool->memblocks_priv_arr[i] == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- /* allocate DMA-capable memblock */
- mempool->memblocks_arr[i] =
- __vxge_hw_blockpool_malloc(mempool->devh,
- mempool->memblock_size, dma_object);
- if (mempool->memblocks_arr[i] == NULL) {
- vfree(mempool->memblocks_priv_arr[i]);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- (*num_allocated)++;
- mempool->memblocks_allocated++;
-
- memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
-
- the_memblock = mempool->memblocks_arr[i];
-
- /* fill the items hash array */
- for (j = 0; j < n_items; j++) {
- u32 index = i * n_items + j;
-
- if (first_time && index >= mempool->items_initial)
- break;
-
- mempool->items_arr[index] =
- ((char *)the_memblock + j*mempool->item_size);
-
- /* let caller to do more job on each item */
- if (mempool->item_func_alloc != NULL)
- mempool->item_func_alloc(mempool, i,
- dma_object, index, is_last);
-
- mempool->items_current = index + 1;
- }
-
- if (first_time && mempool->items_current ==
- mempool->items_initial)
- break;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_mempool_create
- * This function will create memory pool object. Pool may grow but will
- * never shrink. Pool consists of number of dynamically allocated blocks
- * with size enough to hold %items_initial number of items. Memory is
- * DMA-able but client must map/unmap before interoperating with the device.
- */
-static struct vxge_hw_mempool *
-__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 items_priv_size,
- u32 items_initial,
- u32 items_max,
- const struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 memblocks_to_allocate;
- struct vxge_hw_mempool *mempool = NULL;
- u32 allocated;
-
- if (memblock_size < item_size) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- mempool = vzalloc(sizeof(struct vxge_hw_mempool));
- if (mempool == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- mempool->devh = devh;
- mempool->memblock_size = memblock_size;
- mempool->items_max = items_max;
- mempool->items_initial = items_initial;
- mempool->item_size = item_size;
- mempool->items_priv_size = items_priv_size;
- mempool->item_func_alloc = mp_callback->item_func_alloc;
- mempool->userdata = userdata;
-
- mempool->memblocks_allocated = 0;
-
- mempool->items_per_memblock = memblock_size / item_size;
-
- mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
- mempool->items_per_memblock;
-
- /* allocate array of memblocks */
- mempool->memblocks_arr =
- vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
- if (mempool->memblocks_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate array of private parts of items per memblocks */
- mempool->memblocks_priv_arr =
- vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
- if (mempool->memblocks_priv_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate array of memblocks DMA objects */
- mempool->memblocks_dma_arr =
- vzalloc(array_size(sizeof(struct vxge_hw_mempool_dma),
- mempool->memblocks_max));
- if (mempool->memblocks_dma_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate hash array of items */
- mempool->items_arr = vzalloc(array_size(sizeof(void *),
- mempool->items_max));
- if (mempool->items_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* calculate initial number of memblocks */
- memblocks_to_allocate = (mempool->items_initial +
- mempool->items_per_memblock - 1) /
- mempool->items_per_memblock;
-
- /* pre-allocate the mempool */
- status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
- &allocated);
- if (status != VXGE_HW_OK) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
-exit:
- return mempool;
-}
-
-/*
- * __vxge_hw_ring_abort - Returns the RxD
- * This function terminates the RxDs of ring
- */
-static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
-{
- void *rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(channel, &rxdh);
-
- if (rxdh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(channel);
-
- if (ring->rxd_term)
- ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
- channel->userdata);
-
- vxge_hw_channel_dtr_free(channel, rxdh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_ring_reset - Resets the ring
- * This function resets the ring during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- __vxge_hw_ring_abort(ring);
-
- status = __vxge_hw_channel_reset(channel);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_ring_delete - Removes the ring
- * This function freeup the memory pool and removes the ring
- */
-static enum vxge_hw_status
-__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_ring *ring = vp->vpath->ringh;
-
- __vxge_hw_ring_abort(ring);
-
- if (ring->mempool)
- __vxge_hw_mempool_destroy(ring->mempool);
-
- vp->vpath->ringh = NULL;
- __vxge_hw_channel_free(&ring->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_ring_create - Create a Ring
- * This function creates Ring and initializes it.
- */
-static enum vxge_hw_status
-__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_ring_attr *attr)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_ring *ring;
- u32 ring_length;
- struct vxge_hw_ring_config *config;
- struct __vxge_hw_device *hldev;
- u32 vp_id;
- static const struct vxge_hw_mempool_cbs ring_mp_callback = {
- .item_func_alloc = __vxge_hw_ring_mempool_item_alloc,
- };
-
- if ((vp == NULL) || (attr == NULL)) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- hldev = vp->vpath->hldev;
- vp_id = vp->vpath->vp_id;
-
- config = &hldev->config.vp_config[vp_id].ring;
-
- ring_length = config->ring_blocks *
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
-
- ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_RING,
- ring_length,
- attr->per_rxd_space,
- attr->userdata);
- if (ring == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- vp->vpath->ringh = ring;
- ring->vp_id = vp_id;
- ring->vp_reg = vp->vpath->vp_reg;
- ring->common_reg = hldev->common_reg;
- ring->stats = &vp->vpath->sw_stats->ring_stats;
- ring->config = config;
- ring->callback = attr->callback;
- ring->rxd_init = attr->rxd_init;
- ring->rxd_term = attr->rxd_term;
- ring->buffer_mode = config->buffer_mode;
- ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
- ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
- ring->rxds_limit = config->rxds_limit;
-
- ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
- ring->rxd_priv_size =
- sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
- ring->per_rxd_space = attr->per_rxd_space;
-
- ring->rxd_priv_size =
- ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
- /* how many RxDs can fit into one block. Depends on configured
- * buffer_mode. */
- ring->rxds_per_block =
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
-
- /* calculate actual RxD block private size */
- ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
- ring->mempool = __vxge_hw_mempool_create(hldev,
- VXGE_HW_BLOCK_SIZE,
- VXGE_HW_BLOCK_SIZE,
- ring->rxdblock_priv_size,
- ring->config->ring_blocks,
- ring->config->ring_blocks,
- &ring_mp_callback,
- ring);
- if (ring->mempool == NULL) {
- __vxge_hw_ring_delete(vp);
- return VXGE_HW_ERR_OUT_OF_MEMORY;
- }
-
- status = __vxge_hw_channel_initialize(&ring->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
- }
-
- /* Note:
- * Specifying rxd_init callback means two things:
- * 1) rxds need to be initialized by driver at channel-open time;
- * 2) rxds need to be posted at channel-open time
- * (that's what the initial_replenish() below does)
- * Currently we don't have a case when the 1) is done without the 2).
- */
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
- }
- }
-
- /* initial replenish will increment the counter in its post() routine,
- * we have to reset it */
- ring->stats->common_stats.usage_cnt = 0;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_config_default_get - Initialize device config with defaults.
- * Initialize Titan device config with default values.
- */
-enum vxge_hw_status
-vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
-{
- u32 i;
-
- device_config->dma_blockpool_initial =
- VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
- device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
- device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
- device_config->rth_en = VXGE_HW_RTH_DEFAULT;
- device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
- device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
- device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- device_config->vp_config[i].vp_id = i;
-
- device_config->vp_config[i].min_bandwidth =
- VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
-
- device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
-
- device_config->vp_config[i].ring.ring_blocks =
- VXGE_HW_DEF_RING_BLOCKS;
-
- device_config->vp_config[i].ring.buffer_mode =
- VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
-
- device_config->vp_config[i].ring.scatter_mode =
- VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].ring.rxds_limit =
- VXGE_HW_DEF_RING_RXDS_LIMIT;
-
- device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
-
- device_config->vp_config[i].fifo.fifo_blocks =
- VXGE_HW_MIN_FIFO_BLOCKS;
-
- device_config->vp_config[i].fifo.max_frags =
- VXGE_HW_MAX_FIFO_FRAGS;
-
- device_config->vp_config[i].fifo.memblock_size =
- VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
-
- device_config->vp_config[i].fifo.alignment_size =
- VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
-
- device_config->vp_config[i].fifo.intr =
- VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
-
- device_config->vp_config[i].fifo.no_snoop_bits =
- VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
- device_config->vp_config[i].tti.intr_enable =
- VXGE_HW_TIM_INTR_DEFAULT;
-
- device_config->vp_config[i].tti.btimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ac_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ci_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ri_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.rtimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.util_sel =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.ltimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_d =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.intr_enable =
- VXGE_HW_TIM_INTR_DEFAULT;
-
- device_config->vp_config[i].rti.btimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ac_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ci_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ri_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.rtimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.util_sel =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.ltimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_d =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].mtu =
- VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
-
- device_config->vp_config[i].rpa_strip_vlan_tag =
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
- * Set the swapper bits appropriately for the vpath.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
-#ifndef __BIG_ENDIAN
- u64 val64;
-
- val64 = readq(&vpath_reg->vpath_general_cfg1);
- wmb();
- val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
- writeq(val64, &vpath_reg->vpath_general_cfg1);
- wmb();
-#endif
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
- * Set the swapper bits appropriately for the vpath.
- */
-static enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
- u64 val64;
-
- val64 = readq(&legacy_reg->pifm_wr_swap_en);
-
- if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
- val64 = readq(&vpath_reg->kdfcctl_cfg0);
- wmb();
-
- val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
- VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
- VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
-
- writeq(val64, &vpath_reg->kdfcctl_cfg0);
- wmb();
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_mgmt_reg_read - Read Titan register.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
- enum vxge_hw_mgmt_reg_type type,
- u32 index, u32 offset, u64 *value)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- switch (type) {
- case vxge_hw_mgmt_reg_type_legacy:
- if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->legacy_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_toc:
- if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->toc_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_common:
- if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->common_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_mrpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_srpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->srpcim_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpmgmt:
- if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpath:
- if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->vpath_reg[index] +
- offset);
- break;
- default:
- status = VXGE_HW_ERR_INVALID_TYPE;
- break;
- }
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
- */
-enum vxge_hw_status
-vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
-{
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- int i = 0, j = 0;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((vpath_mask) & vxge_mBIT(i)))
- continue;
- vpmgmt_reg = hldev->vpmgmt_reg[i];
- for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
- if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
- & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
- return VXGE_HW_FAIL;
- }
- }
- return VXGE_HW_OK;
-}
-/*
- * vxge_hw_mgmt_reg_Write - Write Titan register.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
- enum vxge_hw_mgmt_reg_type type,
- u32 index, u32 offset, u64 value)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- switch (type) {
- case vxge_hw_mgmt_reg_type_legacy:
- if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->legacy_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_toc:
- if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->toc_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_common:
- if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->common_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_mrpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_srpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
- offset);
-
- break;
- case vxge_hw_mgmt_reg_type_vpmgmt:
- if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpath:
- if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->vpath_reg[index] +
- offset);
- break;
- default:
- status = VXGE_HW_ERR_INVALID_TYPE;
- break;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
- void *txdlh;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
- if (txdlh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(&fifo->channel);
-
- if (fifo->txdl_term) {
- fifo->txdl_term(txdlh,
- VXGE_HW_TXDL_STATE_POSTED,
- fifo->channel.userdata);
- }
-
- vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_fifo_abort(fifo);
- status = __vxge_hw_channel_reset(&fifo->channel);
-
- return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-static enum vxge_hw_status
-__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
- __vxge_hw_fifo_abort(fifo);
-
- if (fifo->mempool)
- __vxge_hw_mempool_destroy(fifo->mempool);
-
- vp->vpath->fifoh = NULL;
-
- __vxge_hw_channel_free(&fifo->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
- * list callback
- * This function is callback passed to __vxge_hw_mempool_create to create memory
- * pool for TxD list
- */
-static void
-__vxge_hw_fifo_mempool_item_alloc(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
- u32 index, u32 is_last)
-{
- u32 memblock_item_idx;
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp =
- (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
- struct __vxge_hw_fifo *fifo =
- (struct __vxge_hw_fifo *)mempoolh->userdata;
- void *memblock = mempoolh->memblocks_arr[memblock_index];
-
- vxge_assert(txdp);
-
- txdp->host_control = (u64) (size_t)
- __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
- &memblock_item_idx);
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
-
- vxge_assert(txdl_priv);
-
- fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
-
- /* pre-format HW's TxDL's private */
- txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
- txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
- txdl_priv->dma_handle = dma_object->handle;
- txdl_priv->memblock = memblock;
- txdl_priv->first_txdp = txdp;
- txdl_priv->next_txdl_priv = NULL;
- txdl_priv->alloc_frags = 0;
-}
-
-/*
- * __vxge_hw_fifo_create - Create a FIFO
- * This function creates FIFO and initializes it.
- */
-static enum vxge_hw_status
-__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_fifo_attr *attr)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_fifo *fifo;
- struct vxge_hw_fifo_config *config;
- u32 txdl_size, txdl_per_memblock;
- struct vxge_hw_mempool_cbs fifo_mp_callback;
- struct __vxge_hw_virtualpath *vpath;
-
- if ((vp == NULL) || (attr == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
- vpath = vp->vpath;
- config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
-
- txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
-
- txdl_per_memblock = config->memblock_size / txdl_size;
-
- fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_FIFO,
- config->fifo_blocks * txdl_per_memblock,
- attr->per_txdl_space, attr->userdata);
-
- if (fifo == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- vpath->fifoh = fifo;
- fifo->nofl_db = vpath->nofl_db;
-
- fifo->vp_id = vpath->vp_id;
- fifo->vp_reg = vpath->vp_reg;
- fifo->stats = &vpath->sw_stats->fifo_stats;
-
- fifo->config = config;
-
- /* apply "interrupts per txdl" attribute */
- fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
- fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
- fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
-
- if (fifo->config->intr)
- fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
-
- fifo->no_snoop_bits = config->no_snoop_bits;
-
- /*
- * FIFO memory management strategy:
- *
- * TxDL split into three independent parts:
- * - set of TxD's
- * - TxD HW private part
- * - driver private part
- *
- * Adaptative memory allocation used. i.e. Memory allocated on
- * demand with the size which will fit into one memory block.
- * One memory block may contain more than one TxDL.
- *
- * During "reserve" operations more memory can be allocated on demand
- * for example due to FIFO full condition.
- *
- * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
- * routine which will essentially stop the channel and free resources.
- */
-
- /* TxDL common private size == TxDL private + driver private */
- fifo->priv_size =
- sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
- fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
- fifo->per_txdl_space = attr->per_txdl_space;
-
- /* recompute txdl size to be cacheline aligned */
- fifo->txdl_size = txdl_size;
- fifo->txdl_per_memblock = txdl_per_memblock;
-
- fifo->txdl_term = attr->txdl_term;
- fifo->callback = attr->callback;
-
- if (fifo->txdl_per_memblock == 0) {
- __vxge_hw_fifo_delete(vp);
- status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
- goto exit;
- }
-
- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
-
- fifo->mempool =
- __vxge_hw_mempool_create(vpath->hldev,
- fifo->config->memblock_size,
- fifo->txdl_size,
- fifo->priv_size,
- (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
- (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
- &fifo_mp_callback,
- fifo);
-
- if (fifo->mempool == NULL) {
- __vxge_hw_fifo_delete(vp);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- status = __vxge_hw_channel_initialize(&fifo->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_fifo_delete(vp);
- goto exit;
- }
-
- vxge_assert(fifo->channel.reserve_ptr);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_read - Read the content of given address
- * in pci config space.
- * Read from the vpath pci config space.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
- u32 phy_func_0, u32 offset, u32 *val)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
-
- if (phy_func_0)
- val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
-
- writeq(val64, &vp_reg->pci_config_access_cfg1);
- wmb();
- writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
- &vp_reg->pci_config_access_cfg2);
- wmb();
-
- status = __vxge_hw_device_register_poll(
- &vp_reg->pci_config_access_cfg2,
- VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->pci_config_access_status);
-
- if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
- status = VXGE_HW_FAIL;
- *val = 0;
- } else
- *val = (u32)vxge_bVALn(val64, 32, 32);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_device_flick_link_led - Flick (blink) link LED.
- * @hldev: HW device.
- * @on_off: TRUE if flickering to be on, FALSE to be off
- *
- * Flicker the link LED.
- */
-enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
-{
- struct __vxge_hw_virtualpath *vpath;
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- if (hldev == NULL) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- data0 = on_off;
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
- */
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
- u32 action, u32 rts_table, u32 offset,
- u64 *data0, u64 *data1)
-{
- enum vxge_hw_status status;
- u64 steer_ctrl = 0;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- if ((rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
- (rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
- (rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
- steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
- }
-
- status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
- data0, data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
- (rts_table !=
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
- *data1 = 0;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
- */
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
- u32 rts_table, u32 offset, u64 steer_data0,
- u64 steer_data1)
-{
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- data0 = steer_data0;
-
- if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
- data1 = steer_data1;
-
- status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
- &data0, &data1, &steer_ctrl);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
- */
-enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
- struct __vxge_hw_vpath_handle *vp,
- enum vxge_hw_rth_algoritms algorithm,
- struct vxge_hw_rth_hash_types *hash_type,
- u16 bucket_size)
-{
- u64 data0, data1;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
- 0, &data0, &data1);
- if (status != VXGE_HW_OK)
- goto exit;
-
- data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
-
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
-
- if (hash_type->hash_type_tcpipv4_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
-
- if (hash_type->hash_type_ipv4_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
-
- if (hash_type->hash_type_tcpipv6_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
-
- if (hash_type->hash_type_ipv6_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
-
- if (hash_type->hash_type_tcpipv6ex_en)
- data0 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
-
- if (hash_type->hash_type_ipv6ex_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
-
- if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
- data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
- else
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
- 0, data0, 0);
-exit:
- return status;
-}
-
-static void
-vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
- u16 flag, u8 *itable)
-{
- switch (flag) {
- case 1:
- *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 2:
- *data0 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 3:
- *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 4:
- *data1 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
- itable[j]);
- return;
- default:
- return;
- }
-}
-/*
- * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
- */
-enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
- struct __vxge_hw_vpath_handle **vpath_handles,
- u32 vpath_count,
- u8 *mtable,
- u8 *itable,
- u32 itable_size)
-{
- u32 i, j, action, rts_table;
- u64 data0;
- u64 data1;
- u32 max_entries;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- max_entries = (((u32)1) << itable_size);
-
- if (vp->vpath->hldev->config.rth_it_type
- == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
- rts_table =
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
-
- for (j = 0; j < max_entries; j++) {
-
- data1 = 0;
-
- data0 =
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
- itable[j]);
-
- status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
- action, rts_table, j, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- for (j = 0; j < max_entries; j++) {
-
- data1 = 0;
-
- data0 =
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
- itable[j]);
-
- status = __vxge_hw_vpath_rts_table_set(
- vpath_handles[mtable[itable[j]]], action,
- rts_table, j, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
- } else {
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
- rts_table =
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
- for (i = 0; i < vpath_count; i++) {
-
- for (j = 0; j < max_entries;) {
-
- data0 = 0;
- data1 = 0;
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 1, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 2, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 3, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 4, itable);
- j++;
- break;
- }
-
- if (data0 != 0) {
- status = __vxge_hw_vpath_rts_table_set(
- vpath_handles[i],
- action, rts_table,
- 0, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
- }
- }
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_check_leak - Check for memory leak
- * @ring: Handle to the ring object used for receive
- *
- * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
- * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
- * Returns: VXGE_HW_FAIL, if leak has occurred.
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u64 rxd_new_count, rxd_spat;
-
- if (ring == NULL)
- return status;
-
- rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
- rxd_spat = readq(&ring->vp_reg->prc_cfg6);
- rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
-
- if (rxd_new_count >= rxd_spat)
- status = VXGE_HW_FAIL;
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_mgmt_read
- * This routine reads the vpath_mgmt registers
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_mgmt_read(
- struct __vxge_hw_device *hldev,
- struct __vxge_hw_virtualpath *vpath)
-{
- u32 i, mtu = 0, max_pyld = 0;
- u64 val64;
-
- for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
-
- val64 = readq(&vpath->vpmgmt_reg->
- rxmac_cfg0_port_vpmgmt_clone[i]);
- max_pyld =
- (u32)
- VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
- (val64);
- if (mtu < max_pyld)
- mtu = max_pyld;
- }
-
- vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
-
- val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (val64 & vxge_mBIT(i))
- vpath->vsport_number = i;
- }
-
- val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
-
- if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
- VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
- else
- VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
- * This routine checks the vpath_rst_in_prog register to see if
- * adapter completed the reset process for the vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
-{
- enum vxge_hw_status status;
-
- status = __vxge_hw_device_register_poll(
- &vpath->hldev->common_reg->vpath_rst_in_prog,
- VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
- 1 << (16 - vpath->vp_id)),
- vpath->hldev->config.device_poll_millis);
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_reset
- * This routine resets the vpath on the device
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
-
- val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->cmn_rsthdlr_cfg0);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_sw_reset
- * This routine resets the vpath structures
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->ringh) {
- status = __vxge_hw_ring_reset(vpath->ringh);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- if (vpath->fifoh)
- status = __vxge_hw_fifo_reset(vpath->fifoh);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_prc_configure
- * This routine configures the prc registers of virtual path using the config
- * passed
- */
-static void
-__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- vp_config = vpath->vp_config;
-
- if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
- return;
-
- val64 = readq(&vp_reg->prc_cfg1);
- val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
- writeq(val64, &vp_reg->prc_cfg1);
-
- val64 = readq(&vpath->vp_reg->prc_cfg6);
- val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
- writeq(val64, &vpath->vp_reg->prc_cfg6);
-
- val64 = readq(&vp_reg->prc_cfg7);
-
- if (vpath->vp_config->ring.scatter_mode !=
- VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
-
- val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
-
- switch (vpath->vp_config->ring.scatter_mode) {
- case VXGE_HW_RING_SCATTER_MODE_A:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
- break;
- case VXGE_HW_RING_SCATTER_MODE_B:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
- break;
- case VXGE_HW_RING_SCATTER_MODE_C:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
- break;
- }
- }
-
- writeq(val64, &vp_reg->prc_cfg7);
-
- writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
- __vxge_hw_ring_first_block_address_get(
- vpath->ringh) >> 3), &vp_reg->prc_cfg5);
-
- val64 = readq(&vp_reg->prc_cfg4);
- val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
- val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
-
- val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
- VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
-
- if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
- val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
- else
- val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
-
- writeq(val64, &vp_reg->prc_cfg4);
-}
-
-/*
- * __vxge_hw_vpath_kdfc_configure
- * This routine configures the kdfc registers of virtual path using the
- * config passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- u64 vpath_stride;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
-
- vpath->max_kdfc_db =
- (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
- val64+1)/2;
-
- if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-
- vpath->max_nofl_db = vpath->max_kdfc_db;
-
- if (vpath->max_nofl_db <
- ((vpath->vp_config->fifo.memblock_size /
- (vpath->vp_config->fifo.max_frags *
- sizeof(struct vxge_hw_fifo_txd))) *
- vpath->vp_config->fifo.fifo_blocks)) {
-
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
- }
- val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
- (vpath->max_nofl_db*2)-1);
- }
-
- writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
-
- writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
- &vp_reg->kdfc_fifo_trpl_ctrl);
-
- val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
-
- val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
-
- val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
-#ifndef __BIG_ENDIAN
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
-#endif
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
-
- writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
- writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
- wmb();
- vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
-
- vpath->nofl_db =
- (struct __vxge_hw_non_offload_db_wrapper __iomem *)
- (hldev->kdfc + (vp_id *
- VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
- vpath_stride)));
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_mac_configure
- * This routine configures the mac of virtual path using the config passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- vp_config = vpath->vp_config;
-
- writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
- vpath->vsport_number), &vp_reg->xmac_vsport_choice);
-
- if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
-
- val64 = readq(&vp_reg->xmac_rpa_vcfg);
-
- if (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
- if (vp_config->rpa_strip_vlan_tag)
- val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
- else
- val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
- }
-
- writeq(val64, &vp_reg->xmac_rpa_vcfg);
- val64 = readq(&vp_reg->rxmac_vcfg0);
-
- if (vp_config->mtu !=
- VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- if ((vp_config->mtu +
- VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
- vp_config->mtu +
- VXGE_HW_MAC_HEADER_MAX_SIZE);
- else
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
- vpath->max_mtu);
- }
-
- writeq(val64, &vp_reg->rxmac_vcfg0);
-
- val64 = readq(&vp_reg->rxmac_vcfg1);
-
- val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
- VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
-
- if (hldev->config.rth_it_type ==
- VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
- val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
- 0x2) |
- VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
- }
-
- writeq(val64, &vp_reg->rxmac_vcfg1);
- }
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_tim_configure
- * This routine configures the tim registers of virtual path using the config
- * passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- config = vpath->vp_config;
-
- writeq(0, &vp_reg->tim_dest_addr);
- writeq(0, &vp_reg->tim_vpath_map);
- writeq(0, &vp_reg->tim_bitmap);
- writeq(0, &vp_reg->tim_remap);
-
- if (config->ring.enable == VXGE_HW_RING_ENABLE)
- writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
- (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
- VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
-
- val64 = readq(&vp_reg->tim_pci_cfg);
- val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
- writeq(val64, &vp_reg->tim_pci_cfg);
-
- if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- config->tti.btimer_val);
- }
-
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
-
- if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ac_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- }
-
- if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ci_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- }
-
- if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
- config->tti.urange_a);
- }
-
- if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
- config->tti.urange_b);
- }
-
- if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
- config->tti.urange_c);
- }
-
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- vpath->tim_tti_cfg1_saved = val64;
-
- val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
- config->tti.uec_a);
- }
-
- if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
- config->tti.uec_b);
- }
-
- if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
- config->tti.uec_c);
- }
-
- if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
- config->tti.uec_d);
- }
-
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ri_en)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- else
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- }
-
- if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- config->tti.rtimer_val);
- }
-
- if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
- }
-
- if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- config->tti.ltimer_val);
- }
-
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
- vpath->tim_tti_cfg3_saved = val64;
- }
-
- if (config->ring.enable == VXGE_HW_RING_ENABLE) {
-
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- config->rti.btimer_val);
- }
-
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
-
- if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ac_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- }
-
- if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ci_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- }
-
- if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
- config->rti.urange_a);
- }
-
- if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
- config->rti.urange_b);
- }
-
- if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
- config->rti.urange_c);
- }
-
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
- vpath->tim_rti_cfg1_saved = val64;
-
- val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
- config->rti.uec_a);
- }
-
- if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
- config->rti.uec_b);
- }
-
- if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
- config->rti.uec_c);
- }
-
- if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
- config->rti.uec_d);
- }
-
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
- val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ri_en)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- else
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- }
-
- if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- config->rti.rtimer_val);
- }
-
- if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
- }
-
- if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- config->rti.ltimer_val);
- }
-
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
- vpath->tim_rti_cfg3_saved = val64;
- }
-
- val64 = 0;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
-
- val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
- val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
- val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
- writeq(val64, &vp_reg->tim_wrkld_clc);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_initialize
- * This routine is the final phase of init which initializes the
- * registers of the vpath using the configuration passed.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- u32 val32;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
- status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
-
- /* Get MRRS value from device control */
- status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
- if (status == VXGE_HW_OK) {
- val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
- val64 &=
- ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
- val64 |=
- VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
-
- val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
- }
-
- val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
- val64 |=
- VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
- VXGE_HW_MAX_PAYLOAD_SIZE_512);
-
- val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
- writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vp_terminate - Terminate Virtual Path structure
- * This routine closes all channels it opened and freeup memory
- */
-static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
- goto exit;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
- vpath->hldev->tim_int_mask1, vpath->vp_id);
- hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
-
- /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
- * work after the interface is brought down.
- */
- spin_lock(&vpath->lock);
- vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
- spin_unlock(&vpath->lock);
-
- vpath->vpmgmt_reg = NULL;
- vpath->nofl_db = NULL;
- vpath->max_mtu = 0;
- vpath->vsport_number = 0;
- vpath->max_kdfc_db = 0;
- vpath->max_nofl_db = 0;
- vpath->ringh = NULL;
- vpath->fifoh = NULL;
- memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
- vpath->stats_block = NULL;
- vpath->hw_stats = NULL;
- vpath->hw_stats_sav = NULL;
- vpath->sw_stats = NULL;
-
-exit:
- return;
-}
-
-/*
- * __vxge_hw_vp_initialize - Initialize Virtual Path structure
- * This routine is the initial phase of init which resets the vpath and
- * initializes the software support structures.
- */
-static enum vxge_hw_status
-__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
- struct vxge_hw_vp_config *config)
-{
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
- status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
- goto exit;
- }
-
- vpath = &hldev->virtual_paths[vp_id];
-
- spin_lock_init(&vpath->lock);
- vpath->vp_id = vp_id;
- vpath->vp_open = VXGE_HW_VP_OPEN;
- vpath->hldev = hldev;
- vpath->vp_config = config;
- vpath->vp_reg = hldev->vpath_reg[vp_id];
- vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
-
- __vxge_hw_vpath_reset(hldev, vp_id);
-
- status = __vxge_hw_vpath_reset_check(vpath);
- if (status != VXGE_HW_OK) {
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
- goto exit;
- }
-
- status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
- if (status != VXGE_HW_OK) {
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
- goto exit;
- }
-
- INIT_LIST_HEAD(&vpath->vpath_handles);
-
- vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
- hldev->tim_int_mask1, vp_id);
-
- status = __vxge_hw_vpath_initialize(hldev, vp_id);
- if (status != VXGE_HW_OK)
- __vxge_hw_vp_terminate(hldev, vp_id);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_mtu_set - Set MTU.
- * Set new MTU value. Example, to use jumbo frames:
- * vxge_hw_vpath_mtu_set(my_device, 9600);
- */
-enum vxge_hw_status
-vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
- vpath = vp->vpath;
-
- new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
-
- if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
- status = VXGE_HW_ERR_INVALID_MTU_SIZE;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-
- vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
- * Enable the DMA vpath statistics. The function is to be called to re-enable
- * the adapter to update stats into the host memory
- */
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- memcpy(vpath->hw_stats_sav, vpath->hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
- * This function allocates a block from block pool or from the system
- */
-static struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (size == blockpool->block_size) {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- blockpool->pool_size--;
- }
- }
-
- if (entry != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
-
- return entry;
-}
-
-/*
- * vxge_hw_vpath_open - Open a virtual path on a given adapter
- * This function is used to open access to virtual path of an
- * adapter for offload, GRO operations. This function returns
- * synchronously.
- */
-enum vxge_hw_status
-vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
- struct vxge_hw_vpath_attr *attr,
- struct __vxge_hw_vpath_handle **vpath_handle)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct __vxge_hw_vpath_handle *vp;
- enum vxge_hw_status status;
-
- vpath = &hldev->virtual_paths[attr->vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_OPEN) {
- status = VXGE_HW_ERR_INVALID_STATE;
- goto vpath_open_exit1;
- }
-
- status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
- &hldev->config.vp_config[attr->vp_id]);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit1;
-
- vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
- if (vp == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto vpath_open_exit2;
- }
-
- vp->vpath = vpath;
-
- if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
- status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit6;
- }
-
- if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
- status = __vxge_hw_ring_create(vp, &attr->ring_attr);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit7;
-
- __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
- }
-
- vpath->fifoh->tx_intr_num =
- (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
- VXGE_HW_VPATH_INTR_TX;
-
- vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
- VXGE_HW_BLOCK_SIZE);
- if (vpath->stats_block == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto vpath_open_exit8;
- }
-
- vpath->hw_stats = vpath->stats_block->memblock;
- memset(vpath->hw_stats, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
- vpath->hw_stats;
-
- vpath->hw_stats_sav =
- &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
- memset(vpath->hw_stats_sav, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
-
- status = vxge_hw_vpath_stats_enable(vp);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit8;
-
- list_add(&vp->item, &vpath->vpath_handles);
-
- hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
-
- *vpath_handle = vp;
-
- attr->fifo_attr.userdata = vpath->fifoh;
- attr->ring_attr.userdata = vpath->ringh;
-
- return VXGE_HW_OK;
-
-vpath_open_exit8:
- if (vpath->ringh != NULL)
- __vxge_hw_ring_delete(vp);
-vpath_open_exit7:
- if (vpath->fifoh != NULL)
- __vxge_hw_fifo_delete(vp);
-vpath_open_exit6:
- vfree(vp);
-vpath_open_exit2:
- __vxge_hw_vp_terminate(hldev, attr->vp_id);
-vpath_open_exit1:
-
- return status;
-}
-
-/**
- * vxge_hw_vpath_rx_doorbell_init - Close the handle got from previous vpath
- * (vpath) open
- * @vp: Handle got from previous vpath open
- *
- * This function is used to close access to virtual path opened
- * earlier.
- */
-void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
- struct __vxge_hw_ring *ring = vpath->ringh;
- struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
- u64 new_count, val64, val164;
-
- if (vdev->titan1) {
- new_count = readq(&vpath->vp_reg->rxdmem_size);
- new_count &= 0x1fff;
- } else
- new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
-
- val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
-
- writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
- &vpath->vp_reg->prc_rxd_doorbell);
- readl(&vpath->vp_reg->prc_rxd_doorbell);
-
- val164 /= 2;
- val64 = readq(&vpath->vp_reg->prc_cfg6);
- val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
- val64 &= 0x1ff;
-
- /*
- * Each RxD is of 4 qwords
- */
- new_count -= (val64 + 1);
- val64 = min(val164, new_count) / 4;
-
- ring->rxds_limit = min(ring->rxds_limit, val64);
- if (ring->rxds_limit < 4)
- ring->rxds_limit = 4;
-}
-
-/*
- * __vxge_hw_blockpool_block_free - Frees a block from block pool
- * @devh: Hal device
- * @entry: Entry of block to be freed
- *
- * This function frees a block from block pool
- */
-static void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
- struct __vxge_hw_blockpool_entry *entry)
-{
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (entry->length == blockpool->block_size) {
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- __vxge_hw_blockpool_blocks_remove(blockpool);
-}
-
-/*
- * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
- * This function is used to close access to virtual path opened
- * earlier.
- */
-enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = NULL;
- struct __vxge_hw_device *devh = NULL;
- u32 vp_id = vp->vpath->vp_id;
- u32 is_empty = TRUE;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- vpath = vp->vpath;
- devh = vpath->hldev;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_close_exit;
- }
-
- list_del(&vp->item);
-
- if (!list_empty(&vpath->vpath_handles)) {
- list_add(&vp->item, &vpath->vpath_handles);
- is_empty = FALSE;
- }
-
- if (!is_empty) {
- status = VXGE_HW_FAIL;
- goto vpath_close_exit;
- }
-
- devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
-
- if (vpath->ringh != NULL)
- __vxge_hw_ring_delete(vp);
-
- if (vpath->fifoh != NULL)
- __vxge_hw_fifo_delete(vp);
-
- if (vpath->stats_block != NULL)
- __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
-
- vfree(vp);
-
- __vxge_hw_vp_terminate(devh, vp_id);
-
-vpath_close_exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_reset - Resets vpath
- * This function is used to request a reset of vpath
- */
-enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status;
- u32 vp_id;
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
-
- vp_id = vpath->vp_id;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
- if (status == VXGE_HW_OK)
- vpath->sw_stats->soft_reset_cnt++;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
- * This function poll's for the vpath reset completion and re initializes
- * the vpath.
- */
-enum vxge_hw_status
-vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = NULL;
- enum vxge_hw_status status;
- struct __vxge_hw_device *hldev;
- u32 vp_id;
-
- vp_id = vp->vpath->vp_id;
- vpath = vp->vpath;
- hldev = vpath->hldev;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- status = __vxge_hw_vpath_reset_check(vpath);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_initialize(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- if (vpath->ringh != NULL)
- __vxge_hw_vpath_prc_configure(hldev, vp_id);
-
- memset(vpath->hw_stats, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- memset(vpath->hw_stats_sav, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- writeq(vpath->stats_block->dma_addr,
- &vpath->vp_reg->stats_cfg);
-
- status = vxge_hw_vpath_stats_enable(vp);
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_enable - Enable vpath.
- * This routine clears the vpath reset thereby enabling a vpath
- * to start forwarding frames and generating interrupts.
- */
-void
-vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_device *hldev;
- u64 val64;
-
- hldev = vp->vpath->hldev;
-
- val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
- 1 << (16 - vp->vpath->vp_id));
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->cmn_rsthdlr_cfg1);
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
deleted file mode 100644
index 0cd0750484ae..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ /dev/null
@@ -1,2086 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_CONFIG_H
-#define VXGE_CONFIG_H
-#include <linux/hardirq.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-
-#ifndef VXGE_CACHE_LINE_SIZE
-#define VXGE_CACHE_LINE_SIZE 128
-#endif
-
-#ifndef VXGE_ALIGN
-#define VXGE_ALIGN(adrs, size) \
- (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
-#endif
-
-#define VXGE_HW_MIN_MTU ETH_MIN_MTU
-#define VXGE_HW_MAX_MTU 9600
-#define VXGE_HW_DEFAULT_MTU 1500
-
-#define VXGE_HW_MAX_ROM_IMAGES 8
-
-struct eprom_image {
- u8 is_valid:1;
- u8 index;
- u8 type;
- u16 version;
-};
-
-#ifdef VXGE_DEBUG_ASSERT
-/**
- * vxge_assert
- * @test: C-condition to check
- * @fmt: printf like format string
- *
- * This function implements traditional assert. By default assertions
- * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
- * compilation
- * time.
- */
-#define vxge_assert(test) BUG_ON(!(test))
-#else
-#define vxge_assert(test)
-#endif /* end of VXGE_DEBUG_ASSERT */
-
-/**
- * enum vxge_debug_level
- * @VXGE_NONE: debug disabled
- * @VXGE_ERR: all errors going to be logged out
- * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
- * going to be logged out. Very noisy.
- *
- * This enumeration going to be used to switch between different
- * debug levels during runtime if DEBUG macro defined during
- * compilation. If DEBUG macro not defined than code will be
- * compiled out.
- */
-enum vxge_debug_level {
- VXGE_NONE = 0,
- VXGE_TRACE = 1,
- VXGE_ERR = 2
-};
-
-#define NULL_VPID 0xFFFFFFFF
-#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
-#define VXGE_DEBUG_MODULE_MASK 0xffffffff
-#define VXGE_DEBUG_TRACE_MASK 0xffffffff
-#define VXGE_DEBUG_ERR_MASK 0xffffffff
-#define VXGE_DEBUG_MASK 0x000001ff
-#else
-#define VXGE_DEBUG_MODULE_MASK 0x20000000
-#define VXGE_DEBUG_TRACE_MASK 0x20000000
-#define VXGE_DEBUG_ERR_MASK 0x20000000
-#define VXGE_DEBUG_MASK 0x00000001
-#endif
-
-/*
- * @VXGE_COMPONENT_LL: do debug for vxge link layer module
- * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
- *
- * This enumeration going to be used to distinguish modules
- * or libraries during compilation and runtime. Makefile must declare
- * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
- */
-#define VXGE_COMPONENT_LL 0x20000000
-#define VXGE_COMPONENT_ALL 0xffffffff
-
-#define VXGE_HW_BASE_INF 100
-#define VXGE_HW_BASE_ERR 200
-#define VXGE_HW_BASE_BADCFG 300
-
-enum vxge_hw_status {
- VXGE_HW_OK = 0,
- VXGE_HW_FAIL = 1,
- VXGE_HW_PENDING = 2,
- VXGE_HW_COMPLETIONS_REMAIN = 3,
-
- VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
- VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
-
- VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
- VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
- VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
- VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
- VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
- VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
- VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
- VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
- VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
- VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
- VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
- VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
- VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
- VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
- VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
- VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
- VXGE_HW_ERR_PRIVILEGED_OPERATION = VXGE_HW_BASE_ERR + 17,
- VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
- VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
- VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
- VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
- VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
-
- VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
- VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
- VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
- VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
- VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
- VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
- VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
-
- VXGE_HW_EOF_TRACE_BUF = -1
-};
-
-/**
- * enum enum vxge_hw_device_link_state - Link state enumeration.
- * @VXGE_HW_LINK_NONE: Invalid link state.
- * @VXGE_HW_LINK_DOWN: Link is down.
- * @VXGE_HW_LINK_UP: Link is up.
- *
- */
-enum vxge_hw_device_link_state {
- VXGE_HW_LINK_NONE,
- VXGE_HW_LINK_DOWN,
- VXGE_HW_LINK_UP
-};
-
-/**
- * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
- * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
- * @VXGE_HW_FW_UPGRADE_DONE: upload completed
- * @VXGE_HW_FW_UPGRADE_ERR: upload error
- * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
- *
- */
-enum vxge_hw_fw_upgrade_code {
- VXGE_HW_FW_UPGRADE_OK = 0,
- VXGE_HW_FW_UPGRADE_DONE = 1,
- VXGE_HW_FW_UPGRADE_ERR = 2,
- VXGE_FW_UPGRADE_BYTES2SKIP = 3
-};
-
-/**
- * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
- * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
- * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
- * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
- */
-enum vxge_hw_fw_upgrade_err_code {
- VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
- VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
- VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
- VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
- VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
-};
-
-/**
- * struct vxge_hw_device_date - Date Format
- * @day: Day
- * @month: Month
- * @year: Year
- * @date: Date in string format
- *
- * Structure for returning date
- */
-
-#define VXGE_HW_FW_STRLEN 32
-struct vxge_hw_device_date {
- u32 day;
- u32 month;
- u32 year;
- char date[VXGE_HW_FW_STRLEN];
-};
-
-struct vxge_hw_device_version {
- u32 major;
- u32 minor;
- u32 build;
- char version[VXGE_HW_FW_STRLEN];
-};
-
-/**
- * struct vxge_hw_fifo_config - Configuration of fifo.
- * @enable: Is this fifo to be commissioned
- * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
- * blocks per queue.
- * @max_frags: Max number of Tx buffers per TxDL (that is, per single
- * transmit operation).
- * No more than 256 transmit buffers can be specified.
- * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
- * bytes. Setting @memblock_size to page size ensures
- * by-page allocation of descriptors. 128K bytes is the
- * maximum supported block size.
- * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
- * (e.g., to align on a cache line).
- * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
- * Use 0 otherwise.
- * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
- * which generally improves latency of the host bridge operation
- * (see PCI specification). For valid values please refer
- * to struct vxge_hw_fifo_config{} in the driver sources.
- * Configuration of all Titan fifos.
- * Note: Valid (min, max) range for each attribute is specified in the body of
- * the struct vxge_hw_fifo_config{} structure.
- */
-struct vxge_hw_fifo_config {
- u32 enable;
-#define VXGE_HW_FIFO_ENABLE 1
-#define VXGE_HW_FIFO_DISABLE 0
-
- u32 fifo_blocks;
-#define VXGE_HW_MIN_FIFO_BLOCKS 2
-#define VXGE_HW_MAX_FIFO_BLOCKS 128
-
- u32 max_frags;
-#define VXGE_HW_MIN_FIFO_FRAGS 1
-#define VXGE_HW_MAX_FIFO_FRAGS 256
-
- u32 memblock_size;
-#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
-#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
-#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
-
- u32 alignment_size;
-#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
-#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
-#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
-
- u32 intr;
-#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
-#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
-#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
-
- u32 no_snoop_bits;
-#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
-#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
-#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
-#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
-#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
-
-};
-/**
- * struct vxge_hw_ring_config - Ring configurations.
- * @enable: Is this ring to be commissioned
- * @ring_blocks: Numbers of RxD blocks in the ring
- * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
- * to Titan User Guide.
- * @scatter_mode: Titan supports two receive scatter modes: A and B.
- * For details please refer to Titan User Guide.
- * @rx_timer_val: The number of 32ns periods that would be counted between two
- * timer interrupts.
- * @greedy_return: If Set it forces the device to return absolutely all RxD
- * that are consumed and still on board when a timer interrupt
- * triggers. If Clear, then if the device has already returned
- * RxD before current timer interrupt triggered and after the
- * previous timer interrupt triggered, then the device is not
- * forced to returned the rest of the consumed RxD that it has
- * on board which account for a byte count less than the one
- * programmed into PRC_CFG6.RXD_CRXDT field
- * @rx_timer_ci: TBD
- * @backoff_interval_us: Time (in microseconds), after which Titan
- * tries to download RxDs posted by the host.
- * Note that the "backoff" does not happen if host posts receive
- * descriptors in the timely fashion.
- * Ring configuration.
- */
-struct vxge_hw_ring_config {
- u32 enable;
-#define VXGE_HW_RING_ENABLE 1
-#define VXGE_HW_RING_DISABLE 0
-#define VXGE_HW_RING_DEFAULT 1
-
- u32 ring_blocks;
-#define VXGE_HW_MIN_RING_BLOCKS 1
-#define VXGE_HW_MAX_RING_BLOCKS 128
-#define VXGE_HW_DEF_RING_BLOCKS 2
-
- u32 buffer_mode;
-#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
-#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
-#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
-#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
-
- u32 scatter_mode;
-#define VXGE_HW_RING_SCATTER_MODE_A 0
-#define VXGE_HW_RING_SCATTER_MODE_B 1
-#define VXGE_HW_RING_SCATTER_MODE_C 2
-#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
-
- u64 rxds_limit;
-#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
-};
-
-/**
- * struct vxge_hw_vp_config - Configuration of virtual path
- * @vp_id: Virtual Path Id
- * @min_bandwidth: Minimum Guaranteed bandwidth
- * @ring: See struct vxge_hw_ring_config{}.
- * @fifo: See struct vxge_hw_fifo_config{}.
- * @tti: Configuration of interrupt associated with Transmit.
- * see struct vxge_hw_tim_intr_config();
- * @rti: Configuration of interrupt associated with Receive.
- * see struct vxge_hw_tim_intr_config();
- * @mtu: mtu size used on this port.
- * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
- * remove the VLAN tag from all received tagged frames that are not
- * replicated at the internal L2 switch.
- * 0 - Do not strip the VLAN tag.
- * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
- * always placed into the RxDMA descriptor.
- *
- * This structure is used by the driver to pass the configuration parameters to
- * configure Virtual Path.
- */
-struct vxge_hw_vp_config {
- u32 vp_id;
-
-#define VXGE_HW_VPATH_PRIORITY_MIN 0
-#define VXGE_HW_VPATH_PRIORITY_MAX 16
-#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
-
- u32 min_bandwidth;
-#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
-#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
-#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
-
- struct vxge_hw_ring_config ring;
- struct vxge_hw_fifo_config fifo;
- struct vxge_hw_tim_intr_config tti;
- struct vxge_hw_tim_intr_config rti;
-
- u32 mtu;
-#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
-#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
-#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
-
- u32 rpa_strip_vlan_tag;
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
-
-};
-/**
- * struct vxge_hw_device_config - Device configuration.
- * @dma_blockpool_initial: Initial size of DMA Pool
- * @dma_blockpool_max: Maximum blocks in DMA pool
- * @intr_mode: Line, or MSI-X interrupt.
- *
- * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
- * @rth_it_type: RTH IT table programming type
- * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
- * @vp_config: Configuration for virtual paths
- * @device_poll_millis: Specify the interval (in mulliseconds)
- * to wait for register reads
- *
- * Titan configuration.
- * Contains per-device configuration parameters, including:
- * - stats sampling interval, etc.
- *
- * In addition, struct vxge_hw_device_config{} includes "subordinate"
- * configurations, including:
- * - fifos and rings;
- * - MAC (done at firmware level).
- *
- * See Titan User Guide for more details.
- * Note: Valid (min, max) range for each attribute is specified in the body of
- * the struct vxge_hw_device_config{} structure. Please refer to the
- * corresponding include file.
- * See also: struct vxge_hw_tim_intr_config{}.
- */
-struct vxge_hw_device_config {
- u32 device_poll_millis;
-#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
-#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
-#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
-
- u32 dma_blockpool_initial;
- u32 dma_blockpool_max;
-#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
-#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
-
-#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
-
- u32 intr_mode:2,
-#define VXGE_HW_INTR_MODE_IRQLINE 0
-#define VXGE_HW_INTR_MODE_MSIX 1
-#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
-
-#define VXGE_HW_INTR_MODE_DEF 0
-
- rth_en:1,
-#define VXGE_HW_RTH_DISABLE 0
-#define VXGE_HW_RTH_ENABLE 1
-#define VXGE_HW_RTH_DEFAULT 0
-
- rth_it_type:1,
-#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
-#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
-#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
-
- rts_mac_en:1,
-#define VXGE_HW_RTS_MAC_DISABLE 0
-#define VXGE_HW_RTS_MAC_ENABLE 1
-#define VXGE_HW_RTS_MAC_DEFAULT 0
-
- hwts_en:1;
-#define VXGE_HW_HWTS_DISABLE 0
-#define VXGE_HW_HWTS_ENABLE 1
-#define VXGE_HW_HWTS_DEFAULT 1
-
- struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * function vxge_uld_link_up_f - Link-Up callback provided by driver.
- * @devh: HW device handle.
- * Link-up notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * function vxge_uld_link_down_f - Link-Down callback provided by
- * driver.
- * @devh: HW device handle.
- *
- * Link-Down notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * function vxge_uld_crit_err_f - Critical Error notification callback.
- * @devh: HW device handle.
- * (typically - at HW device iinitialization time).
- * @type: Enumerated hw error, e.g.: double ECC.
- * @serr_data: Titan status.
- * @ext_data: Extended data. The contents depends on the @type.
- *
- * Link-Down notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
- * @link_up: See vxge_uld_link_up_f{}.
- * @link_down: See vxge_uld_link_down_f{}.
- * @crit_err: See vxge_uld_crit_err_f{}.
- *
- * Driver slow-path (per-driver) callbacks.
- * Implemented by driver and provided to HW via
- * vxge_hw_driver_initialize().
- * Note that these callbacks are not mandatory: HW will not invoke
- * a callback if NULL is specified.
- *
- * See also: vxge_hw_driver_initialize().
- */
-struct vxge_hw_uld_cbs {
- void (*link_up)(struct __vxge_hw_device *devh);
- void (*link_down)(struct __vxge_hw_device *devh);
- void (*crit_err)(struct __vxge_hw_device *devh,
- enum vxge_hw_event type, u64 ext_data);
-};
-
-/*
- * struct __vxge_hw_blockpool_entry - Block private data structure
- * @item: List header used to link.
- * @length: Length of the block
- * @memblock: Virtual address block
- * @dma_addr: DMA Address of the block.
- * @dma_handle: DMA handle of the block.
- * @acc_handle: DMA acc handle
- *
- * Block is allocated with a header to put the blocks into list.
- *
- */
-struct __vxge_hw_blockpool_entry {
- struct list_head item;
- u32 length;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
-};
-
-/*
- * struct __vxge_hw_blockpool - Block Pool
- * @hldev: HW device
- * @block_size: size of each block.
- * @Pool_size: Number of blocks in the pool
- * @pool_max: Maximum number of blocks above which to free additional blocks
- * @req_out: Number of block requests with OS out standing
- * @free_block_list: List of free blocks
- *
- * Block pool contains the DMA blocks preallocated.
- *
- */
-struct __vxge_hw_blockpool {
- struct __vxge_hw_device *hldev;
- u32 block_size;
- u32 pool_size;
- u32 pool_max;
- u32 req_out;
- struct list_head free_block_list;
- struct list_head free_entry_list;
-};
-
-/*
- * enum enum __vxge_hw_channel_type - Enumerated channel types.
- * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
- * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
- * @VXGE_HW_CHANNEL_TYPE_RING: ring.
- * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
- * (and recognized) channel types. Currently: 2.
- *
- * Enumerated channel types. Currently there are only two link-layer
- * channels - Titan fifo and Titan ring. In the future the list will grow.
- */
-enum __vxge_hw_channel_type {
- VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
- VXGE_HW_CHANNEL_TYPE_FIFO = 1,
- VXGE_HW_CHANNEL_TYPE_RING = 2,
- VXGE_HW_CHANNEL_TYPE_MAX = 3
-};
-
-/*
- * struct __vxge_hw_channel
- * @item: List item; used to maintain a list of open channels.
- * @type: Channel type. See enum vxge_hw_channel_type{}.
- * @devh: Device handle. HW device object that contains _this_ channel.
- * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
- * @length: Channel length. Currently allocated number of descriptors.
- * The channel length "grows" when more descriptors get allocated.
- * See _hw_mempool_grow.
- * @reserve_arr: Reserve array. Contains descriptors that can be reserved
- * by driver for the subsequent send or receive operation.
- * See vxge_hw_fifo_txdl_reserve(),
- * vxge_hw_ring_rxd_reserve().
- * @reserve_ptr: Current pointer in the resrve array
- * @reserve_top: Reserve top gives the maximum number of dtrs available in
- * reserve array.
- * @work_arr: Work array. Contains descriptors posted to the channel.
- * Note that at any point in time @work_arr contains 3 types of
- * descriptors:
- * 1) posted but not yet consumed by Titan device;
- * 2) consumed but not yet completed;
- * 3) completed but not yet freed
- * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
- * @post_index: Post index. At any point in time points on the
- * position in the channel, which'll contain next to-be-posted
- * descriptor.
- * @compl_index: Completion index. At any point in time points on the
- * position in the channel, which will contain next
- * to-be-completed descriptor.
- * @free_arr: Free array. Contains completed descriptors that were freed
- * (i.e., handed over back to HW) by driver.
- * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
- * @free_ptr: current pointer in free array
- * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
- * to store per-operation control information.
- * @stats: Pointer to common statistics
- * @userdata: Per-channel opaque (void*) user-defined context, which may be
- * driver object, ULP connection, etc.
- * Once channel is open, @userdata is passed back to user via
- * vxge_hw_channel_callback_f.
- *
- * HW channel object.
- *
- * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
- */
-struct __vxge_hw_channel {
- struct list_head item;
- enum __vxge_hw_channel_type type;
- struct __vxge_hw_device *devh;
- struct __vxge_hw_vpath_handle *vph;
- u32 length;
- u32 vp_id;
- void **reserve_arr;
- u32 reserve_ptr;
- u32 reserve_top;
- void **work_arr;
- u32 post_index ____cacheline_aligned;
- u32 compl_index ____cacheline_aligned;
- void **free_arr;
- u32 free_ptr;
- void **orig_arr;
- u32 per_dtr_space;
- void *userdata;
- struct vxge_hw_common_reg __iomem *common_reg;
- u32 first_vp_id;
- struct vxge_hw_vpath_stats_sw_common_info *stats;
-
-} ____cacheline_aligned;
-
-/*
- * struct __vxge_hw_virtualpath - Virtual Path
- *
- * @vp_id: Virtual path id
- * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
- * @hldev: Hal device
- * @vp_config: Virtual Path Config
- * @vp_reg: VPATH Register map address in BAR0
- * @vpmgmt_reg: VPATH_MGMT register map address
- * @max_mtu: Max mtu that can be supported
- * @vsport_number: vsport attached to this vpath
- * @max_kdfc_db: Maximum kernel mode doorbells
- * @max_nofl_db: Maximum non offload doorbells
- * @tx_intr_num: Interrupt Number associated with the TX
-
- * @ringh: Ring Queue
- * @fifoh: FIFO Queue
- * @vpath_handles: Virtual Path handles list
- * @stats_block: Memory for DMAing stats
- * @stats: Vpath statistics
- *
- * Virtual path structure to encapsulate the data related to a virtual path.
- * Virtual paths are allocated by the HW upon getting configuration from the
- * driver and inserted into the list of virtual paths.
- */
-struct __vxge_hw_virtualpath {
- u32 vp_id;
-
- u32 vp_open;
-#define VXGE_HW_VP_NOT_OPEN 0
-#define VXGE_HW_VP_OPEN 1
-
- struct __vxge_hw_device *hldev;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
-
- u32 max_mtu;
- u32 vsport_number;
- u32 max_kdfc_db;
- u32 max_nofl_db;
- u64 tim_tti_cfg1_saved;
- u64 tim_tti_cfg3_saved;
- u64 tim_rti_cfg1_saved;
- u64 tim_rti_cfg3_saved;
-
- struct __vxge_hw_ring *____cacheline_aligned ringh;
- struct __vxge_hw_fifo *____cacheline_aligned fifoh;
- struct list_head vpath_handles;
- struct __vxge_hw_blockpool_entry *stats_block;
- struct vxge_hw_vpath_stats_hw_info *hw_stats;
- struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- spinlock_t lock;
-};
-
-/*
- * struct __vxge_hw_vpath_handle - List item to store callback information
- * @item: List head to keep the item in linked list
- * @vpath: Virtual path to which this item belongs
- *
- * This structure is used to store the callback information.
- */
-struct __vxge_hw_vpath_handle {
- struct list_head item;
- struct __vxge_hw_virtualpath *vpath;
-};
-
-/*
- * struct __vxge_hw_device
- *
- * HW device object.
- */
-/**
- * struct __vxge_hw_device - Hal device object
- * @magic: Magic Number
- * @bar0: BAR0 virtual address.
- * @pdev: Physical device handle
- * @config: Confguration passed by the LL driver at initialization
- * @link_state: Link state
- *
- * HW device object. Represents Titan adapter
- */
-struct __vxge_hw_device {
- u32 magic;
-#define VXGE_HW_DEVICE_MAGIC 0x12345678
-#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
- void __iomem *bar0;
- struct pci_dev *pdev;
- struct net_device *ndev;
- struct vxge_hw_device_config config;
- enum vxge_hw_device_link_state link_state;
-
- const struct vxge_hw_uld_cbs *uld_callbacks;
-
- u32 host_type;
- u32 func_id;
- u32 access_rights;
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
- struct vxge_hw_legacy_reg __iomem *legacy_reg;
- struct vxge_hw_toc_reg __iomem *toc_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
- struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
- [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
- [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
- struct vxge_hw_vpath_reg __iomem *vpath_reg \
- [VXGE_HW_TITAN_VPATH_REG_SPACES];
- u8 __iomem *kdfc;
- u8 __iomem *usdc;
- struct __vxge_hw_virtualpath virtual_paths \
- [VXGE_HW_MAX_VIRTUAL_PATHS];
- u64 vpath_assignments;
- u64 vpaths_deployed;
- u32 first_vp_id;
- u64 tim_int_mask0[4];
- u32 tim_int_mask1[4];
-
- struct __vxge_hw_blockpool block_pool;
- struct vxge_hw_device_stats stats;
- u32 debug_module_mask;
- u32 debug_level;
- u32 level_err;
- u32 level_trace;
- u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
-};
-
-#define VXGE_HW_INFO_LEN 64
-/**
- * struct vxge_hw_device_hw_info - Device information
- * @host_type: Host Type
- * @func_id: Function Id
- * @vpath_mask: vpath bit mask
- * @fw_version: Firmware version
- * @fw_date: Firmware Date
- * @flash_version: Firmware version
- * @flash_date: Firmware Date
- * @mac_addrs: Mac addresses for each vpath
- * @mac_addr_masks: Mac address masks for each vpath
- *
- * Returns the vpath mask that has the bits set for each vpath allocated
- * for the driver and the first mac address for each vpath
- */
-struct vxge_hw_device_hw_info {
- u32 host_type;
-#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
-#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
-#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
-#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
-#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
-#define VXGE_HW_SR_VH_FUNCTION0 5
-#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
-#define VXGE_HW_VH_NORMAL_FUNCTION 7
- u64 function_mode;
-#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
-#define VXGE_HW_FUNCTION_MODE_SRIOV 2
-#define VXGE_HW_FUNCTION_MODE_MRIOV 3
-#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
-#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
-#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
-#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
-
- u32 func_id;
- u64 vpath_mask;
- struct vxge_hw_device_version fw_version;
- struct vxge_hw_device_date fw_date;
- struct vxge_hw_device_version flash_version;
- struct vxge_hw_device_date flash_date;
- u8 serial_number[VXGE_HW_INFO_LEN];
- u8 part_number[VXGE_HW_INFO_LEN];
- u8 product_desc[VXGE_HW_INFO_LEN];
- u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
- u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
-};
-
-/**
- * struct vxge_hw_device_attr - Device memory spaces.
- * @bar0: BAR0 virtual address.
- * @pdev: PCI device object.
- *
- * Device memory spaces. Includes configuration, BAR0 etc. per device
- * mapped memories. Also, includes a pointer to OS-specific PCI device object.
- */
-struct vxge_hw_device_attr {
- void __iomem *bar0;
- struct pci_dev *pdev;
- const struct vxge_hw_uld_cbs *uld_callbacks;
-};
-
-#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
-
-#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
- if (i < 16) { \
- m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
- m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
- } \
- else { \
- m1[0] = 0x80000000; \
- m1[1] = 0x40000000; \
- } \
-}
-
-#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
- if (i < 16) { \
- m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
- m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
- } \
- else { \
- m1[0] = 0; \
- m1[1] = 0; \
- } \
-}
-
-#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
- status = vxge_hw_mrpcim_stats_access(hldev, \
- VXGE_HW_STATS_OP_READ, \
- loc, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
-/*
- * struct __vxge_hw_ring - Ring channel.
- * @channel: Channel "base" of this ring, the common part of all HW
- * channels.
- * @mempool: Memory pool, the pool from which descriptors get allocated.
- * (See vxge_hw_mm.h).
- * @config: Ring configuration, part of device configuration
- * (see struct vxge_hw_device_config{}).
- * @ring_length: Length of the ring
- * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
- * as per Titan User Guide.
- * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
- * 1-buffer mode descriptor is 32 byte long, etc.
- * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
- * per-descriptor data (e.g., DMA handle for Solaris)
- * @per_rxd_space: Per rxd space requested by driver
- * @rxds_per_block: Number of descriptors per hardware-defined RxD
- * block. Depends on the (1-, 3-, 5-) buffer mode.
- * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
- * usage. Not to confuse with @rxd_priv_size.
- * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
- * @callback: Channel completion callback. HW invokes the callback when there
- * are new completions on that channel. In many implementations
- * the @callback executes in the hw interrupt context.
- * @rxd_init: Channel's descriptor-initialize callback.
- * See vxge_hw_ring_rxd_init_f{}.
- * If not NULL, HW invokes the callback when opening
- * the ring.
- * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding channel.
- * See also vxge_hw_channel_rxd_term_f{}.
- * @stats: Statistics for ring
- * Ring channel.
- *
- * Note: The structure is cache line aligned to better utilize
- * CPU cache performance.
- */
-struct __vxge_hw_ring {
- struct __vxge_hw_channel channel;
- struct vxge_hw_mempool *mempool;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- u32 ring_length;
- u32 buffer_mode;
- u32 rxd_size;
- u32 rxd_priv_size;
- u32 per_rxd_space;
- u32 rxds_per_block;
- u32 rxdblock_priv_size;
- u32 cmpl_cnt;
- u32 vp_id;
- u32 doorbell_cnt;
- u32 total_db_cnt;
- u64 rxds_limit;
- u32 rtimer;
- u64 tim_rti_cfg1_saved;
- u64 tim_rti_cfg3_saved;
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_ring *ringh,
- void *rxdh,
- u8 t_code,
- void *userdata);
-
- enum vxge_hw_status (*rxd_init)(
- void *rxdh,
- void *userdata);
-
- void (*rxd_term)(
- void *rxdh,
- enum vxge_hw_rxd_state state,
- void *userdata);
-
- struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
- struct vxge_hw_ring_config *config;
-} ____cacheline_aligned;
-
-/**
- * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
- * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
- * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
- * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
- * device.
- * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
- * filling-in and posting later.
- *
- * Titan/HW descriptor states.
- *
- */
-enum vxge_hw_txdl_state {
- VXGE_HW_TXDL_STATE_NONE = 0,
- VXGE_HW_TXDL_STATE_AVAIL = 1,
- VXGE_HW_TXDL_STATE_POSTED = 2,
- VXGE_HW_TXDL_STATE_FREED = 3
-};
-/*
- * struct __vxge_hw_fifo - Fifo.
- * @channel: Channel "base" of this fifo, the common part of all HW
- * channels.
- * @mempool: Memory pool, from which descriptors get allocated.
- * @config: Fifo configuration, part of device configuration
- * (see struct vxge_hw_device_config{}).
- * @interrupt_type: Interrupt type to be used
- * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
- * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
- * on TxDL please refer to Titan UG.
- * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
- * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
- * @priv_size: Per-Tx descriptor space reserved for driver
- * usage.
- * @per_txdl_space: Per txdl private space for the driver
- * @callback: Fifo completion callback. HW invokes the callback when there
- * are new completions on that fifo. In many implementations
- * the @callback executes in the hw interrupt context.
- * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding fifo.
- * See also vxge_hw_fifo_txdl_term_f{}.
- * @stats: Statistics of this fifo
- *
- * Fifo channel.
- * Note: The structure is cache line aligned.
- */
-struct __vxge_hw_fifo {
- struct __vxge_hw_channel channel;
- struct vxge_hw_mempool *mempool;
- struct vxge_hw_fifo_config *config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
- u64 interrupt_type;
- u32 no_snoop_bits;
- u32 txdl_per_memblock;
- u32 txdl_size;
- u32 priv_size;
- u32 per_txdl_space;
- u32 vp_id;
- u32 tx_intr_num;
- u32 rtimer;
- u64 tim_tti_cfg1_saved;
- u64 tim_tti_cfg3_saved;
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code,
- void *userdata,
- struct sk_buff ***skb_ptr,
- int nr_skb,
- int *more);
-
- void (*txdl_term)(
- void *txdlh,
- enum vxge_hw_txdl_state state,
- void *userdata);
-
- struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
-} ____cacheline_aligned;
-
-/*
- * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
- * @dma_addr: DMA (mapped) address of _this_ descriptor.
- * @dma_handle: DMA handle used to map the descriptor onto device.
- * @dma_offset: Descriptor's offset in the memory block. HW allocates
- * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
- * Each memblock is a contiguous block of DMA-able memory.
- * @frags: Total number of fragments (that is, contiguous data buffers)
- * carried by this TxDL.
- * @align_vaddr_start: Aligned virtual address start
- * @align_vaddr: Virtual address of the per-TxDL area in memory used for
- * alignement. Used to place one or more mis-aligned fragments
- * @align_dma_addr: DMA address translated from the @align_vaddr.
- * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
- * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
- * @align_dma_offset: The current offset into the @align_vaddr area.
- * Grows while filling the descriptor, gets reset.
- * @align_used_frags: Number of fragments used.
- * @alloc_frags: Total number of fragments allocated.
- * @unused: TODO
- * @next_txdl_priv: (TODO).
- * @first_txdp: (TODO).
- * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
- * TxDL list.
- * @txdlh: Corresponding txdlh to this TxDL.
- * @memblock: Pointer to the TxDL memory block or memory page.
- * on the next send operation.
- * @dma_object: DMA address and handle of the memory block that contains
- * the descriptor. This member is used only in the "checked"
- * version of the HW (to enforce certain assertions);
- * otherwise it gets compiled out.
- * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
- *
- * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
- * information associated with the descriptor. Note that driver can ask HW
- * to allocate additional per-descriptor space for its own (driver-specific)
- * purposes.
- *
- * See also: struct vxge_hw_ring_rxd_priv{}.
- */
-struct __vxge_hw_fifo_txdl_priv {
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- ptrdiff_t dma_offset;
- u32 frags;
- u8 *align_vaddr_start;
- u8 *align_vaddr;
- dma_addr_t align_dma_addr;
- struct pci_dev *align_dma_handle;
- struct pci_dev *align_dma_acch;
- ptrdiff_t align_dma_offset;
- u32 align_used_frags;
- u32 alloc_frags;
- u32 unused;
- struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
- struct vxge_hw_fifo_txd *first_txdp;
- void *memblock;
-};
-
-/*
- * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
- * @control_0: Bits 0 to 7 - Doorbell type.
- * Bits 8 to 31 - Reserved.
- * Bits 32 to 39 - The highest TxD in this TxDL.
- * Bits 40 to 47 - Reserved.
- * Bits 48 to 55 - Reserved.
- * Bits 56 to 63 - No snoop flags.
- * @txdl_ptr: The starting location of the TxDL in host memory.
- *
- * Created by the host and written to the adapter via PIO to a Kernel Doorbell
- * FIFO. All non-offload doorbell wrapper fields must be written by the host as
- * part of a doorbell write. Consumed by the adapter but is not written by the
- * adapter.
- */
-struct __vxge_hw_non_offload_db_wrapper {
- u64 control_0;
-#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
-#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_NODBW_TYPE_NODBW 0
-
-#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
-#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
-
-#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
-#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
-#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
-#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
-
- u64 txdl_ptr;
-};
-
-/*
- * TX Descriptor
- */
-
-/**
- * struct vxge_hw_fifo_txd - Transmit Descriptor
- * @control_0: Bits 0 to 6 - Reserved.
- * Bit 7 - List Ownership. This field should be initialized
- * to '1' by the driver before the transmit list pointer is
- * written to the adapter. This field will be set to '0' by the
- * adapter once it has completed transmitting the frame or frames in
- * the list. Note - This field is only valid in TxD0. Additionally,
- * for multi-list sequences, the driver should not release any
- * buffers until the ownership of the last list in the multi-list
- * sequence has been returned to the host.
- * Bits 8 to 11 - Reserved
- * Bits 12 to 15 - Transfer_Code. This field is only valid in
- * TxD0. It is used to describe the status of the transmit data
- * buffer transfer. This field is always overwritten by the
- * adapter, so this field may be initialized to any value.
- * Bits 16 to 17 - Host steering. This field allows the host to
- * override the selection of the physical transmit port.
- * Attention:
- * Normal sounds as if learned from the switch rather than from
- * the aggregation algorythms.
- * 00: Normal. Use Destination/MAC Address
- * lookup to determine the transmit port.
- * 01: Send on physical Port1.
- * 10: Send on physical Port0.
- * 11: Send on both ports.
- * Bits 18 to 21 - Reserved
- * Bits 22 to 23 - Gather_Code. This field is set by the host and
- * is used to describe how individual buffers comprise a frame.
- * 10: First descriptor of a frame.
- * 00: Middle of a multi-descriptor frame.
- * 01: Last descriptor of a frame.
- * 11: First and last descriptor of a frame (the entire frame
- * resides in a single buffer).
- * For multi-descriptor frames, the only valid gather code sequence
- * is {10, [00], 01}. In other words, the descriptors must be placed
- * in the list in the correct order.
- * Bits 24 to 27 - Reserved
- * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
- * definition. Only valid in TxD0. This field allows the host to
- * indicate the Ethernet encapsulation of an outbound LSO packet.
- * 00 - classic mode (best guess)
- * 01 - LLC
- * 10 - SNAP
- * 11 - DIX
- * If "classic mode" is selected, the adapter will attempt to
- * decode the frame's Ethernet encapsulation by examining the L/T
- * field as follows:
- * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
- * if packet is IPv4 or IPv6.
- * 0x8870 Jumbo-SNAP encoding.
- * 0x0800 IPv4 DIX encoding
- * 0x86DD IPv6 DIX encoding
- * others illegal encapsulation
- * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
- * Set to 1 to perform segmentation offload for TCP/UDP.
- * This field is valid only in TxD0.
- * Bits 31 to 33 - Reserved.
- * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
- * This field is meaningful only when LSO_Control is non-zero.
- * When LSO_Control is set to TCP_LSO, the single (possibly large)
- * TCP segment described by this TxDL will be sent as a series of
- * TCP segments each of which contains no more than LSO_MSS
- * payload bytes.
- * When LSO_Control is set to UDP_LSO, the single (possibly large)
- * UDP datagram described by this TxDL will be sent as a series of
- * UDP datagrams each of which contains no more than LSO_MSS
- * payload bytes.
- * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
- * or TCP payload, with the exception of the last, which will have
- * <= LSO_MSS bytes of payload.
- * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
- * buffer to be read by the adapter. This field is written by the
- * host. A value of 0 is illegal.
- * Bits 32 to 63 - This value is written by the adapter upon
- * completion of a UDP or TCP LSO operation and indicates the number
- * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
- * returned for any non-LSO operation.
- * @control_1: Bits 0 to 4 - Reserved.
- * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
- * offload. This field is only valid in the first TxD of a frame.
- * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
- * This field is only valid in the first TxD of a frame (the TxD's
- * gather code must be 10 or 11). The driver should only set this
- * bit if it can guarantee that TCP is present.
- * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
- * This field is only valid in the first TxD of a frame (the TxD's
- * gather code must be 10 or 11). The driver should only set this
- * bit if it can guarantee that UDP is present.
- * Bits 8 to 14 - Reserved.
- * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
- * instruct the adapter to insert the VLAN tag specified by the
- * Tx_VLAN_Tag field. This field is only valid in the first TxD of
- * a frame.
- * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
- * to be inserted into the frame by the adapter (the first two bytes
- * of a VLAN tag are always 0x8100). This field is only valid if the
- * Tx_VLAN_Enable field is set to '1'.
- * Bits 32 to 33 - Reserved.
- * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
- * number the frame associated with. This field is written by the
- * host. It is only valid in the first TxD of a frame.
- * Bits 40 to 42 - Reserved.
- * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
- * functions. This field is valid only in the first TxD
- * of a frame.
- * Bits 44 to 45 - Reserved.
- * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
- * generate an interrupt as soon as all of the frames in the list
- * have been transmitted. In order to have per-frame interrupts,
- * the driver should place a maximum of one frame per list. This
- * field is only valid in the first TxD of a frame.
- * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
- * to count the frame toward the utilization interrupt specified in
- * the Tx_Int_Number field. This field is only valid in the first
- * TxD of a frame.
- * Bits 48 to 63 - Reserved.
- * @buffer_pointer: Buffer start address.
- * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
- * Titan descriptor prior to posting the latter on the fifo
- * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
- * to the driver with each completed descriptor.
- *
- * Transmit descriptor (TxD).Fifo descriptor contains configured number
- * (list) of TxDs. * For more details please refer to Titan User Guide,
- * Section 5.4.2 "Transmit Descriptor (TxD) Format".
- */
-struct vxge_hw_fifo_txd {
- u64 control_0;
-#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
-
-#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
-#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
-
-
-#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
-#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
-#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
-
-
-#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
-
-#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
-
-#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
-
- u64 control_1;
-#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
-#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
-#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
-#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
-
-#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
-
-#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
-#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
-
- u64 buffer_pointer;
-
- u64 host_control;
-};
-
-/**
- * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
- * @host_control: This field is exclusively for host use and is "readonly"
- * from the adapter's perspective.
- * @control_0:Bits 0 to 6 - RTH_Bucket get
- * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
- * by the host, and is set to 0 by the adapter.
- * 0 - Host owns RxD and buffer.
- * 1 - The adapter owns RxD and buffer.
- * Bit 8 - Fast_Path_Eligible When set, indicates that the
- * received frame meets all of the criteria for fast path processing.
- * The required criteria are as follows:
- * !SYN &
- * (Transfer_Code == "Transfer OK") &
- * (!Is_IP_Fragment) &
- * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
- * (Is_IPv6)) &
- * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
- * (Is_UDP & (computed_L4_checksum == 0xFFFF |
- * computed _L4_checksum == 0x0000)))
- * (same meaning for all RxD buffer modes)
- * Bit 9 - L3 Checksum Correct
- * Bit 10 - L4 Checksum Correct
- * Bit 11 - Reserved
- * Bit 12 to 15 - This field is written by the adapter. It is
- * used to report the status of the frame transfer to the host.
- * 0x0 - Transfer OK
- * 0x4 - RDA Failure During Transfer
- * 0x5 - Unparseable Packet, such as unknown IPv6 header.
- * 0x6 - Frame integrity error (FCS or ECC).
- * 0x7 - Buffer Size Error. The provided buffer(s) were not
- * appropriately sized and data loss occurred.
- * 0x8 - Internal ECC Error. RxD corrupted.
- * 0x9 - IPv4 Checksum error
- * 0xA - TCP/UDP Checksum error
- * 0xF - Unknown Error or Multiple Error. Indicates an
- * unknown problem or that more than one of transfer codes is set.
- * Bit 16 - SYN The adapter sets this field to indicate that
- * the incoming frame contained a TCP segment with its SYN bit
- * set and its ACK bit NOT set. (same meaning for all RxD buffer
- * modes)
- * Bit 17 - Is ICMP
- * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
- * Socket Pair Direct Match Table and the frame was steered based
- * on SPDM.
- * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
- * Indirection Table and the frame was steered based on hash
- * indirection.
- * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
- * type) that was used to calculate the hash.
- * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
- * tagged.
- * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
- * of the received frame.
- * 0x0 - Ethernet DIX
- * 0x1 - LLC
- * 0x2 - SNAP (includes Jumbo-SNAP)
- * 0x3 - IPX
- * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
- * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
- * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
- * IP packet.
- * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
- * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
- * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
- * arrived with the frame. If the resulting computed IPv4 header
- * checksum for the frame did not produce the expected 0xFFFF value,
- * then the transfer code would be set to 0x9.
- * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
- * arrived with the frame. If the resulting computed TCP/UDP checksum
- * for the frame did not produce the expected 0xFFFF value, then the
- * transfer code would be set to 0xA.
- * @control_1:Bits 0 to 1 - Reserved
- * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
- * eventually overwritten by the adapter. The host writes the
- * available buffer size in bytes when it passes the descriptor to
- * the adapter. When a frame is delivered the host, the adapter
- * populates this field with the number of bytes written into the
- * buffer. The largest supported buffer is 16, 383 bytes.
- * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
- * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
- * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
- * of the VLAN tag, if one was detected by the adapter. This field is
- * populated even if VLAN-tag stripping is enabled.
- * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
- *
- * One buffer mode RxD for ring structure
- */
-struct vxge_hw_ring_rxd_1 {
- u64 host_control;
- u64 control_0;
-#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
-
-#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
-
-#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
-
-#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
-
-#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
-
-#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
-#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
-
-#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
-
-#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
-
-#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
-
-#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
-
-#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
-
-#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
-
-#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
-
-#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
-
-#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
-
-#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
-
-#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
-
- u64 control_1;
-
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
-
-#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
-
-#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
-
- u64 buffer0_ptr;
-};
-
-enum vxge_hw_rth_algoritms {
- RTH_ALG_JENKINS = 0,
- RTH_ALG_MS_RSS = 1,
- RTH_ALG_CRC32C = 2
-};
-
-/**
- * struct vxge_hw_rth_hash_types - RTH hash types.
- * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
- * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
- * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
- * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
- * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
- * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
- *
- * Used to pass RTH hash types to rts_rts_set.
- *
- * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
- */
-struct vxge_hw_rth_hash_types {
- u8 hash_type_tcpipv4_en:1,
- hash_type_ipv4_en:1,
- hash_type_tcpipv6_en:1,
- hash_type_ipv6_en:1,
- hash_type_tcpipv6ex_en:1,
- hash_type_ipv6ex_en:1;
-};
-
-void vxge_hw_device_debug_set(
- struct __vxge_hw_device *devh,
- enum vxge_debug_level level,
- u32 mask);
-
-u32
-vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
-
-u32
-vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
-
-/**
- * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
- * @buf_mode: Buffer mode (1, 3 or 5)
- *
- * This function returns the size of RxD for given buffer mode
- */
-static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
-{
- return sizeof(struct vxge_hw_ring_rxd_1);
-}
-
-/**
- * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
- * @buf_mode: Buffer mode (1 buffer mode only)
- *
- * This function returns the number of RxD for RxD block for given buffer mode
- */
-static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
-{
- return (u32)((VXGE_HW_BLOCK_SIZE-16) /
- sizeof(struct vxge_hw_ring_rxd_1));
-}
-
-/**
- * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
- * @rxdh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer this descriptor
- * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
- * the receive buffer should be already mapped to the device
- * @size: Size of the receive @dma_pointer buffer.
- *
- * Prepare 1-buffer-mode Rx descriptor for posting
- * (via vxge_hw_ring_rxd_post()).
- *
- * This inline helper-function does not return any parameters and always
- * succeeds.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_set(
- void *rxdh,
- dma_addr_t dma_pointer,
- u32 size)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- rxdp->buffer0_ptr = dma_pointer;
- rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
- rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
-}
-
-/**
- * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
- * descriptor.
- * @vpath_handle: Virtual Path handle.
- * @rxdh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer this descriptor
- * carries. Returned by HW.
- * @pkt_length: Length (in bytes) of the data in the buffer pointed by
- *
- * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
- * This inline helper-function uses completed descriptor to populate receive
- * buffer pointer and other "out" parameters. The function always succeeds.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_get(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- u32 *pkt_length)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
-
- *pkt_length =
- (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
-}
-
-/**
- * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
- * a completed receive descriptor for 1b mode.
- * @vpath_handle: Virtual Path handle.
- * @rxdh: Descriptor handle.
- * @rxd_info: Descriptor information
- *
- * Retrieve extended information associated with a completed receive descriptor.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_info_get(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- struct vxge_hw_ring_rxd_info *rxd_info)
-{
-
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- rxd_info->syn_flag =
- (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
- rxd_info->is_icmp =
- (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
- rxd_info->fast_path_eligible =
- (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
- rxd_info->l3_cksum_valid =
- (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
- rxd_info->l3_cksum =
- (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
- rxd_info->l4_cksum_valid =
- (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
- rxd_info->l4_cksum =
- (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
- rxd_info->frame =
- (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
- rxd_info->proto =
- (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
- rxd_info->is_vlan =
- (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
- rxd_info->vlan =
- (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
- rxd_info->rth_bucket =
- (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
- rxd_info->rth_it_hit =
- (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
- rxd_info->rth_spdm_hit =
- (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
- rxd_info->rth_hash_type =
- (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
- rxd_info->rth_value =
- (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
-}
-
-/**
- * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
- * of 1b mode 3b mode ring.
- * @rxdh: Descriptor handle.
- *
- * Returns: private driver info associated with the descriptor.
- * driver requests per-descriptor space via vxge_hw_ring_attr.
- *
- */
-static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- return (void *)(size_t)rxdp->host_control;
-}
-
-/**
- * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
- * @txdlh: Descriptor handle.
- * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
- * and/or TCP and/or UDP.
- *
- * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
- * descriptor.
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
- * and vxge_hw_fifo_txdl_buffer_set().
- * All these APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
- txdp->control_1 |= cksum_bits;
-}
-
-/**
- * vxge_hw_fifo_txdl_mss_set - Set MSS.
- * @txdlh: Descriptor handle.
- * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
- * driver, which in turn inserts the MSS into the @txdlh.
- *
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
- * and vxge_hw_fifo_txdl_cksum_set_bits().
- * All these APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
- txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
-}
-
-/**
- * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
- * @txdlh: Descriptor handle.
- * @vlan_tag: 16bit VLAN tag.
- *
- * Insert VLAN tag into specified transmit descriptor.
- * The actual insertion of the tag into outgoing frame is done by the hardware.
- */
-static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
- txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
-}
-
-/**
- * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
- * @txdlh: Descriptor handle.
- *
- * Retrieve per-descriptor private data.
- * Note that driver requests per-descriptor space via
- * struct vxge_hw_fifo_attr passed to
- * vxge_hw_vpath_open().
- *
- * Returns: private driver data associated with the descriptor.
- */
-static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- return (void *)(size_t)txdp->host_control;
-}
-
-/**
- * struct vxge_hw_ring_attr - Ring open "template".
- * @callback: Ring completion callback. HW invokes the callback when there
- * are new completions on that ring. In many implementations
- * the @callback executes in the hw interrupt context.
- * @rxd_init: Ring's descriptor-initialize callback.
- * See vxge_hw_ring_rxd_init_f{}.
- * If not NULL, HW invokes the callback when opening
- * the ring.
- * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding ring.
- * See also vxge_hw_ring_rxd_term_f{}.
- * @userdata: User-defined "context" of _that_ ring. Passed back to the
- * user as one of the @callback, @rxd_init, and @rxd_term arguments.
- * @per_rxd_space: If specified (i.e., greater than zero): extra space
- * reserved by HW per each receive descriptor.
- * Can be used to store
- * and retrieve on completion, information specific
- * to the driver.
- *
- * Ring open "template". User fills the structure with ring
- * attributes and passes it to vxge_hw_vpath_open().
- */
-struct vxge_hw_ring_attr {
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_ring *ringh,
- void *rxdh,
- u8 t_code,
- void *userdata);
-
- enum vxge_hw_status (*rxd_init)(
- void *rxdh,
- void *userdata);
-
- void (*rxd_term)(
- void *rxdh,
- enum vxge_hw_rxd_state state,
- void *userdata);
-
- void *userdata;
- u32 per_rxd_space;
-};
-
-/**
- * function vxge_hw_fifo_callback_f - FIFO callback.
- * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
- * descriptors.
- * @txdlh: First completed descriptor.
- * @txdl_priv: Pointer to per txdl space allocated
- * @t_code: Transfer code, as per Titan User Guide.
- * Returned by HW.
- * @host_control: Opaque 64bit data stored by driver inside the Titan
- * descriptor prior to posting the latter on the fifo
- * via vxge_hw_fifo_txdl_post(). The @host_control is returned
- * as is to the driver with each completed descriptor.
- * @userdata: Opaque per-fifo data specified at fifo open
- * time, via vxge_hw_vpath_open().
- *
- * Fifo completion callback (type declaration). A single per-fifo
- * callback is specified at fifo open time, via
- * vxge_hw_vpath_open(). Typically gets called as part of the processing
- * of the Interrupt Service Routine.
- *
- * Fifo callback gets called by HW if, and only if, there is at least
- * one new completion on a given fifo. Upon processing the first @txdlh driver
- * is _supposed_ to continue consuming completions using:
- * - vxge_hw_fifo_txdl_next_completed()
- *
- * Note that failure to process new completions in a timely fashion
- * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
- *
- * Non-zero @t_code means failure to process transmit descriptor.
- *
- * In the "transmit" case the failure could happen, for instance, when the
- * link is down, in which case Titan completes the descriptor because it
- * is not able to send the data out.
- *
- * For details please refer to Titan User Guide.
- *
- * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
- */
-/**
- * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
- * @txdlh: First completed descriptor.
- * @txdl_priv: Pointer to per txdl space allocated
- * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
- * @userdata: Per-fifo user data (a.k.a. context) specified at
- * fifo open time, via vxge_hw_vpath_open().
- *
- * Terminate descriptor callback. Unless NULL is specified in the
- * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
- * HW invokes the callback as part of closing fifo, prior to
- * de-allocating the ring and associated data structures
- * (including descriptors).
- * driver should utilize the callback to (for instance) unmap
- * and free DMA data buffers associated with the posted (state =
- * VXGE_HW_TXDL_STATE_POSTED) descriptors,
- * as well as other relevant cleanup functions.
- *
- * See also: struct vxge_hw_fifo_attr{}
- */
-/**
- * struct vxge_hw_fifo_attr - Fifo open "template".
- * @callback: Fifo completion callback. HW invokes the callback when there
- * are new completions on that fifo. In many implementations
- * the @callback executes in the hw interrupt context.
- * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding fifo.
- * See also vxge_hw_fifo_txdl_term_f{}.
- * @userdata: User-defined "context" of _that_ fifo. Passed back to the
- * user as one of the @callback, and @txdl_term arguments.
- * @per_txdl_space: If specified (i.e., greater than zero): extra space
- * reserved by HW per each transmit descriptor. Can be used to
- * store, and retrieve on completion, information specific
- * to the driver.
- *
- * Fifo open "template". User fills the structure with fifo
- * attributes and passes it to vxge_hw_vpath_open().
- */
-struct vxge_hw_fifo_attr {
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code,
- void *userdata,
- struct sk_buff ***skb_ptr,
- int nr_skb, int *more);
-
- void (*txdl_term)(
- void *txdlh,
- enum vxge_hw_txdl_state state,
- void *userdata);
-
- void *userdata;
- u32 per_txdl_space;
-};
-
-/**
- * struct vxge_hw_vpath_attr - Attributes of virtual path
- * @vp_id: Identifier of Virtual Path
- * @ring_attr: Attributes of ring for non-offload receive
- * @fifo_attr: Attributes of fifo for non-offload transmit
- *
- * Attributes of virtual path. This structure is passed as parameter
- * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
- */
-struct vxge_hw_vpath_attr {
- u32 vp_id;
- struct vxge_hw_ring_attr ring_attr;
- struct vxge_hw_fifo_attr fifo_attr;
-};
-
-enum vxge_hw_status vxge_hw_device_hw_info_get(
- void __iomem *bar0,
- struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status vxge_hw_device_config_default_get(
- struct vxge_hw_device_config *device_config);
-
-/**
- * vxge_hw_device_link_state_get - Get link state.
- * @devh: HW device handle.
- *
- * Get link state.
- * Returns: link state.
- */
-static inline
-enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
- struct __vxge_hw_device *devh)
-{
- return devh->link_state;
-}
-
-void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
-
-const u8 *
-vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
-
-u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
-
-const u8 *
-vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_initialize(
- struct __vxge_hw_device **devh,
- struct vxge_hw_device_attr *attr,
- struct vxge_hw_device_config *device_config);
-
-enum vxge_hw_status vxge_hw_device_getpause_data(
- struct __vxge_hw_device *devh,
- u32 port,
- u32 *tx,
- u32 *rx);
-
-enum vxge_hw_status vxge_hw_device_setpause_data(
- struct __vxge_hw_device *devh,
- u32 port,
- u32 tx,
- u32 rx);
-
-static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
- unsigned long size,
- struct pci_dev **p_dmah,
- struct pci_dev **p_dma_acch)
-{
- void *vaddr;
- unsigned long misaligned = 0;
- int realloc_flag = 0;
- *p_dma_acch = *p_dmah = NULL;
-
-realloc:
- vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
- if (vaddr == NULL)
- return vaddr;
- misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
- VXGE_CACHE_LINE_SIZE);
- if (realloc_flag)
- goto out;
-
- if (misaligned) {
- /* misaligned, free current one and try allocating
- * size + VXGE_CACHE_LINE_SIZE memory
- */
- kfree(vaddr);
- size += VXGE_CACHE_LINE_SIZE;
- realloc_flag = 1;
- goto realloc;
- }
-out:
- *(unsigned long *)p_dma_acch = misaligned;
- vaddr = (void *)((u8 *)vaddr + misaligned);
- return vaddr;
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
-/*
- * __vxge_hw_mempool_item_priv - will return pointer on per item private space
- */
-static inline void*
-__vxge_hw_mempool_item_priv(
- struct vxge_hw_mempool *mempool,
- u32 memblock_idx,
- void *item,
- u32 *memblock_item_idx)
-{
- ptrdiff_t offset;
- void *memblock = mempool->memblocks_arr[memblock_idx];
-
-
- offset = (u32)((u8 *)item - (u8 *)memblock);
- vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
-
- (*memblock_item_idx) = (u32) offset / mempool->item_size;
- vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
-
- return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
- (*memblock_item_idx) * mempool->items_priv_size;
-}
-
-/*
- * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
- * for the fifo.
- * @fifo: Fifo
- * @txdp: Poniter to a TxD
- */
-static inline struct __vxge_hw_fifo_txdl_priv *
-__vxge_hw_fifo_txdl_priv(
- struct __vxge_hw_fifo *fifo,
- struct vxge_hw_fifo_txd *txdp)
-{
- return (struct __vxge_hw_fifo_txdl_priv *)
- (((char *)((ulong)txdp->host_control)) +
- fifo->per_txdl_space);
-}
-
-enum vxge_hw_status vxge_hw_vpath_open(
- struct __vxge_hw_device *devh,
- struct vxge_hw_vpath_attr *attr,
- struct __vxge_hw_vpath_handle **vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_close(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-vxge_hw_vpath_reset(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-vxge_hw_vpath_recover_from_reset(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void
-vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
-
-enum vxge_hw_status
-vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status vxge_hw_vpath_mtu_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 new_mtu);
-
-void
-vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-
-static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
-{
- writel(val, addr + 4);
-}
-
-static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
-enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
-
-enum vxge_hw_status
-vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
-
-/**
- * vxge_debug_ll
- * @level: level of debug verbosity.
- * @mask: mask for the debug
- * @buf: Circular buffer for tracing
- * @fmt: printf like format string
- *
- * Provides logging facilities. Can be customized on per-module
- * basis or/and with debug levels. Input parameters, except
- * module and level, are the same as posix printf. This function
- * may be compiled out if DEBUG macro was never defined.
- * See also: enum vxge_debug_level{}.
- */
-#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) do { \
- if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
- (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
- if ((mask & VXGE_DEBUG_MASK) == mask) \
- printk(fmt "\n", ##__VA_ARGS__); \
-} while (0)
-#else
-#define vxge_debug_ll(level, mask, fmt, ...)
-#endif
-
-enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
- struct __vxge_hw_vpath_handle **vpath_handles,
- u32 vpath_count,
- u8 *mtable,
- u8 *itable,
- u32 itable_size);
-
-enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- enum vxge_hw_rth_algoritms algorithm,
- struct vxge_hw_rth_hash_types *hash_type,
- u16 bucket_size);
-
-enum vxge_hw_status
-__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
-
-#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
-#define VXGE_HW_MAX_POLLING_COUNT 100
-
-void
-vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
- u32 *minor, u32 *build);
-
-enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
- int size);
-
-enum vxge_hw_status
-vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
- struct eprom_image *eprom_image_data);
-
-int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
deleted file mode 100644
index 4d91026485ae..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ /dev/null
@@ -1,1154 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/ethtool.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/etherdevice.h>
-
-#include "vxge-ethtool.h"
-
-static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
- {"\n DRIVER STATISTICS"},
- {"vpaths_opened"},
- {"vpath_open_fail_cnt"},
- {"link_up_cnt"},
- {"link_down_cnt"},
- {"tx_frms"},
- {"tx_errors"},
- {"tx_bytes"},
- {"txd_not_free"},
- {"txd_out_of_desc"},
- {"rx_frms"},
- {"rx_errors"},
- {"rx_bytes"},
- {"rx_mcast"},
- {"pci_map_fail_cnt"},
- {"skb_alloc_fail_cnt"}
-};
-
-/**
- * vxge_ethtool_set_link_ksettings - Sets different link parameters.
- * @dev: device pointer.
- * @cmd: pointer to the structure with parameters given by ethtool to set
- * link information.
- *
- * The function sets different link parameters provided by the user onto
- * the NIC.
- * Return value:
- * 0 on success.
- */
-static int
-vxge_ethtool_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- /* We currently only support 10Gb/FULL */
- if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
- (cmd->base.speed != SPEED_10000) ||
- (cmd->base.duplex != DUPLEX_FULL))
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * vxge_ethtool_get_link_ksettings - Return link specific information.
- * @dev: device pointer.
- * @cmd: pointer to the structure with parameters given by ethtool
- * to return link information.
- *
- * Returns link specific information like speed, duplex etc.. to ethtool.
- * Return value :
- * return 0 on success.
- */
-static int vxge_ethtool_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
-
- if (netif_carrier_ok(dev)) {
- cmd->base.speed = SPEED_10000;
- cmd->base.duplex = DUPLEX_FULL;
- } else {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- }
-
- cmd->base.autoneg = AUTONEG_DISABLE;
- return 0;
-}
-
-/**
- * vxge_ethtool_gdrvinfo - Returns driver specific information.
- * @dev: device pointer.
- * @info: pointer to the structure with parameters given by ethtool to
- * return driver information.
- *
- * Returns driver specefic information like name, version etc.. to ethtool.
- */
-static void vxge_ethtool_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
-}
-
-/**
- * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer.
- * @dev: device pointer.
- * @regs: pointer to the structure with parameters given by ethtool for
- * dumping the registers.
- * @space: The input argument into which all the registers are dumped.
- *
- * Dumps the vpath register space of Titan NIC into the user given
- * buffer area.
- */
-static void vxge_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
-{
- int index, offset;
- enum vxge_hw_status status;
- u64 reg;
- u64 *reg_space = (u64 *)space;
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
- regs->version = vdev->pdev->subsystem_device;
- for (index = 0; index < vdev->no_of_vpath; index++) {
- for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg);
- offset += 8) {
- status = vxge_hw_mgmt_reg_read(hldev,
- vxge_hw_mgmt_reg_type_vpath,
- vdev->vpaths[index].device_id,
- offset, &reg);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Getting reg dump Failed",
- __func__, __LINE__);
- return;
- }
- *reg_space++ = reg;
- }
- }
-}
-
-/**
- * vxge_ethtool_idnic - To physically identify the nic on the system.
- * @dev : device pointer.
- * @state : requested LED state
- *
- * Used to physically identify the NIC on the system.
- * 0 on success
- */
-static int vxge_ethtool_idnic(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * vxge_ethtool_getpause_data - Pause frame frame generation and reception.
- * @dev : device pointer.
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * Returns the Pause frame generation and reception capability of the NIC.
- * Return value:
- * void
- */
-static void vxge_ethtool_getpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
-}
-
-/**
- * vxge_ethtool_setpause_data - set/reset pause frame generation.
- * @dev : device pointer.
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- * Return value:
- * int, returns 0 on Success
- */
-static int vxge_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
-
- vdev->config.tx_pause_enable = ep->tx_pause;
- vdev->config.rx_pause_enable = ep->rx_pause;
-
- return 0;
-}
-
-static void vxge_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *estats, u64 *tmp_stats)
-{
- int j, k;
- enum vxge_hw_status status;
- enum vxge_hw_status swstatus;
- struct vxge_vpath *vpath = NULL;
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
- struct vxge_hw_xmac_stats *xmac_stats;
- struct vxge_hw_device_stats_sw_info *sw_stats;
- struct vxge_hw_device_stats_hw_info *hw_stats;
-
- u64 *ptr = tmp_stats;
-
- memset(tmp_stats, 0,
- vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64));
-
- xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL);
- if (xmac_stats == NULL) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for xmac_stats",
- __func__, __LINE__);
- return;
- }
-
- sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info),
- GFP_KERNEL);
- if (sw_stats == NULL) {
- kfree(xmac_stats);
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for sw_stats",
- __func__, __LINE__);
- return;
- }
-
- hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info),
- GFP_KERNEL);
- if (hw_stats == NULL) {
- kfree(xmac_stats);
- kfree(sw_stats);
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for hw_stats",
- __func__, __LINE__);
- return;
- }
-
- *ptr++ = 0;
- status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
- if (status != VXGE_HW_OK) {
- if (status != VXGE_HW_ERR_PRIVILEGED_OPERATION) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Failure in getting xmac stats",
- __func__, __LINE__);
- }
- }
- swstatus = vxge_hw_driver_stats_get(hldev, sw_stats);
- if (swstatus != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Failure in getting sw stats",
- __func__, __LINE__);
- }
-
- status = vxge_hw_device_stats_get(hldev, hw_stats);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d hw_stats_get error", __func__, __LINE__);
- }
-
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_hw_info *vpath_info;
-
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = hw_stats->vpath_info[j];
- if (!vpath_info) {
- memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN +
- VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64));
- ptr += (VXGE_HW_VPATH_TX_STATS_LEN +
- VXGE_HW_VPATH_RX_STATS_LEN);
- continue;
- }
-
- *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms;
- *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets;
- *ptr++ = vpath_info->tx_stats.tx_data_octets;
- *ptr++ = vpath_info->tx_stats.tx_mcast_frms;
- *ptr++ = vpath_info->tx_stats.tx_bcast_frms;
- *ptr++ = vpath_info->tx_stats.tx_ucast_frms;
- *ptr++ = vpath_info->tx_stats.tx_tagged_frms;
- *ptr++ = vpath_info->tx_stats.tx_vld_ip;
- *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets;
- *ptr++ = vpath_info->tx_stats.tx_icmp;
- *ptr++ = vpath_info->tx_stats.tx_tcp;
- *ptr++ = vpath_info->tx_stats.tx_rst_tcp;
- *ptr++ = vpath_info->tx_stats.tx_udp;
- *ptr++ = vpath_info->tx_stats.tx_unknown_protocol;
- *ptr++ = vpath_info->tx_stats.tx_lost_ip;
- *ptr++ = vpath_info->tx_stats.tx_parse_error;
- *ptr++ = vpath_info->tx_stats.tx_tcp_offload;
- *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload;
- *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload;
- *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms;
- *ptr++ = vpath_info->rx_stats.rx_vld_frms;
- *ptr++ = vpath_info->rx_stats.rx_offload_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets;
- *ptr++ = vpath_info->rx_stats.rx_data_octets;
- *ptr++ = vpath_info->rx_stats.rx_offload_octets;
- *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms;
- *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms;
- *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms;
- *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms;
- *ptr++ = vpath_info->rx_stats.rx_tagged_frms;
- *ptr++ = vpath_info->rx_stats.rx_long_frms;
- *ptr++ = vpath_info->rx_stats.rx_usized_frms;
- *ptr++ = vpath_info->rx_stats.rx_osized_frms;
- *ptr++ = vpath_info->rx_stats.rx_frag_frms;
- *ptr++ = vpath_info->rx_stats.rx_jabber_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms;
- *ptr++ = vpath_info->rx_stats.rx_ip;
- *ptr++ = vpath_info->rx_stats.rx_accepted_ip;
- *ptr++ = vpath_info->rx_stats.rx_ip_octets;
- *ptr++ = vpath_info->rx_stats.rx_err_ip;
- *ptr++ = vpath_info->rx_stats.rx_icmp;
- *ptr++ = vpath_info->rx_stats.rx_tcp;
- *ptr++ = vpath_info->rx_stats.rx_udp;
- *ptr++ = vpath_info->rx_stats.rx_err_tcp;
- *ptr++ = vpath_info->rx_stats.rx_lost_frms;
- *ptr++ = vpath_info->rx_stats.rx_lost_ip;
- *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload;
- *ptr++ = vpath_info->rx_stats.rx_various_discard;
- *ptr++ = vpath_info->rx_stats.rx_sleep_discard;
- *ptr++ = vpath_info->rx_stats.rx_red_discard;
- *ptr++ = vpath_info->rx_stats.rx_queue_full_discard;
- *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms;
- }
- *ptr++ = 0;
- for (k = 0; k < vdev->max_config_port; k++) {
- *ptr++ = xmac_stats->aggr_stats[k].tx_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets;
- *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets;
- *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms;
- }
- *ptr++ = 0;
- for (k = 0; k < vdev->max_config_port; k++) {
- *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_data_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_vld_ip;
- *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_icmp;
- *ptr++ = xmac_stats->port_stats[k].tx_tcp;
- *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp;
- *ptr++ = xmac_stats->port_stats[k].tx_udp;
- *ptr++ = xmac_stats->port_stats[k].tx_parse_error;
- *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol;
- *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_drop_ip;
- *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match;
- *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_drop_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_offload_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_data_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_offload_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_long_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_usized_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_osized_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_frag_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_ip_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_err_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_icmp;
- *ptr++ = xmac_stats->port_stats[k].rx_tcp;
- *ptr++ = xmac_stats->port_stats[k].rx_udp;
- *ptr++ = xmac_stats->port_stats[k].rx_err_tcp;
- *ptr++ = xmac_stats->port_stats[k].rx_pause_count;
- *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_udp;
- *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_switch_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_len_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_rts_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_trash_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_red_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match;
- *ptr++ = xmac_stats->port_stats[k].rx_local_fault;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match;
- *ptr++ = xmac_stats->port_stats[k].rx_jettison;
- *ptr++ = xmac_stats->port_stats[k].rx_remote_fault;
- }
-
- *ptr++ = 0;
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_sw_info *vpath_info;
-
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = (struct vxge_hw_vpath_stats_sw_info *)
- &sw_stats->vpath_info[j];
- *ptr++ = vpath_info->soft_reset_cnt;
- *ptr++ = vpath_info->error_stats.unknown_alarms;
- *ptr++ = vpath_info->error_stats.network_sustained_fault;
- *ptr++ = vpath_info->error_stats.network_sustained_ok;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error;
- *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow;
- *ptr++ = vpath_info->error_stats.statsb_pif_chain_error;
- *ptr++ = vpath_info->error_stats.statsb_drop_timeout;
- *ptr++ = vpath_info->error_stats.target_illegal_access;
- *ptr++ = vpath_info->error_stats.ini_serr_det;
- *ptr++ = vpath_info->error_stats.prc_ring_bumps;
- *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err;
- *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort;
- *ptr++ = vpath_info->error_stats.prc_quanta_size_err;
- *ptr++ = vpath_info->ring_stats.common_stats.full_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.usage_max;
- *ptr++ = vpath_info->ring_stats.common_stats.
- reserve_free_swaps_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt;
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j];
- *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.usage_max;
- *ptr++ = vpath_info->fifo_stats.common_stats.
- reserve_free_swaps_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt;
- *ptr++ = vpath_info->fifo_stats.total_posts;
- *ptr++ = vpath_info->fifo_stats.total_buffers;
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j];
- }
-
- *ptr++ = 0;
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_hw_info *vpath_info;
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = hw_stats->vpath_info[j];
- if (!vpath_info) {
- memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64));
- ptr += VXGE_HW_VPATH_STATS_LEN;
- continue;
- }
- *ptr++ = vpath_info->ini_num_mwr_sent;
- *ptr++ = vpath_info->ini_num_mrd_sent;
- *ptr++ = vpath_info->ini_num_cpl_rcvd;
- *ptr++ = vpath_info->ini_num_mwr_byte_sent;
- *ptr++ = vpath_info->ini_num_cpl_byte_rcvd;
- *ptr++ = vpath_info->wrcrdtarb_xoff;
- *ptr++ = vpath_info->rdcrdtarb_xoff;
- *ptr++ = vpath_info->vpath_genstats_count0;
- *ptr++ = vpath_info->vpath_genstats_count1;
- *ptr++ = vpath_info->vpath_genstats_count2;
- *ptr++ = vpath_info->vpath_genstats_count3;
- *ptr++ = vpath_info->vpath_genstats_count4;
- *ptr++ = vpath_info->vpath_genstats_count5;
- *ptr++ = vpath_info->prog_event_vnum0;
- *ptr++ = vpath_info->prog_event_vnum1;
- *ptr++ = vpath_info->prog_event_vnum2;
- *ptr++ = vpath_info->prog_event_vnum3;
- *ptr++ = vpath_info->rx_multi_cast_frame_discard;
- *ptr++ = vpath_info->rx_frm_transferred;
- *ptr++ = vpath_info->rxd_returned;
- *ptr++ = vpath_info->rx_mpa_len_fail_frms;
- *ptr++ = vpath_info->rx_mpa_mrk_fail_frms;
- *ptr++ = vpath_info->rx_mpa_crc_fail_frms;
- *ptr++ = vpath_info->rx_permitted_frms;
- *ptr++ = vpath_info->rx_vp_reset_discarded_frms;
- *ptr++ = vpath_info->rx_wol_frms;
- *ptr++ = vpath_info->tx_vp_reset_discarded_frms;
- }
-
- *ptr++ = 0;
- *ptr++ = vdev->stats.vpaths_open;
- *ptr++ = vdev->stats.vpath_open_fail;
- *ptr++ = vdev->stats.link_up;
- *ptr++ = vdev->stats.link_down;
-
- for (k = 0; k < vdev->no_of_vpath; k++) {
- *ptr += vdev->vpaths[k].fifo.stats.tx_frms;
- *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors;
- *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes;
- *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free;
- *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc;
- *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
- *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
- *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
- *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
- *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail +
- vdev->vpaths[k].ring.stats.pci_map_fail;
- *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
- }
-
- ptr += 12;
-
- kfree(xmac_stats);
- kfree(sw_stats);
- kfree(hw_stats);
-}
-
-static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
- u8 *data)
-{
- int stat_size = 0;
- int i, j;
- struct vxgedev *vdev = netdev_priv(dev);
- switch (stringset) {
- case ETH_SS_STATS:
- vxge_add_string("VPATH STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("tx_ttl_eth_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ttl_eth_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ucast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_rst_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_unknown_proto_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lost_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_parse_error_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_offload_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_retx_tcp_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lost_ip_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_eth_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_eth_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_mcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_bcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_long_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_usized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_osized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frag_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jabber_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_ip_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_various_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_sleep_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_red_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_queue_full_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_ok_frms_%d\t\t\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->max_config_port; i++) {
- vxge_add_string("tx_frms_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_discarded_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_errored_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frms_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_discarded_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_errored_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unknown_slow_proto_frms_%d\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\nPORT STATISTICS%s\t\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->max_config_port; i++) {
- vxge_add_string("tx_ttl_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ttl_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ucast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_rst_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_parse_error_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_unknown_protocol_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_pause_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_marker_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lacpdu_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_drop_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_char2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_char1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_column2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_column1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_any_err_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_drop_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_mcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_bcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_long_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_usized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_osized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frag_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jabber_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_octets_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_pause_count_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_pause_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unsup_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_fcs_err_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_in_rng_len_err_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_out_rng_len_err_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_discard_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_udp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_marker_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lacpdu_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unknown_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_fcs_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_illegal_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_switch_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_len_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_rpa_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_l2_mgmt_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_rts_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_trash_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_buff_full_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_red_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_char1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_err_sym_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_column1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_char2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_local_fault_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_column2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jettison_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_remote_fault_%d\t\t\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("soft_reset_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("unknown_alarms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("network_sustained_fault_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("network_sustained_ok_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_poison_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("dblgen_fifo0_overflow_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("statsb_pif_chain_error_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("statsb_drop_timeout_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("target_illegal_access_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_serr_det_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_ring_bumps_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_rxdcm_sc_abort_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_quanta_size_err_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_full_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_usage_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_usage_max_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_reserve_free_swaps_cnt_%d\t",
- &stat_size, data, i);
- vxge_add_string("ring_total_compl_cnt_%d\t\t",
- &stat_size, data, i);
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t",
- &stat_size, data, j, i);
- vxge_add_string("fifo_full_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_usage_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_usage_max_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_compl_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_posts_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_buffers_%d\t\t",
- &stat_size, data, i);
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- vxge_add_string("txd_t_code_err_cnt%d_%d\t\t",
- &stat_size, data, j, i);
- }
-
- vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("ini_num_mwr_sent_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_mrd_sent_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_mwr_byte_sent_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("wrcrdtarb_xoff_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rdcrdtarb_xoff_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count0_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count1_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count2_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count3_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count4_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count5_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum0_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum1_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum2_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum3_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_multi_cast_frame_discard_%d\t",
- &stat_size, data, i);
- vxge_add_string("rx_frm_transferred_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rxd_returned_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_len_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_permitted_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vp_reset_discarded_frms_%d\t",
- &stat_size, data, i);
- vxge_add_string("rx_wol_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vp_reset_discarded_frms_%d\t",
- &stat_size, data, i);
- }
-
- memcpy(data + stat_size, &ethtool_driver_stats_keys,
- sizeof(ethtool_driver_stats_keys));
- }
-}
-
-static int vxge_ethtool_get_regs_len(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
-}
-
-static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- switch (sset) {
- case ETH_SS_STATS:
- return VXGE_TITLE_LEN +
- (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) +
- (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) +
- (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_SW_STATS_LEN) +
- DRIVER_STAT_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
- printk(KERN_INFO "Single Function Mode is required to flash the"
- " firmware\n");
- return -EINVAL;
- }
-
- if (netif_running(dev)) {
- printk(KERN_INFO "Interface %s must be down to flash the "
- "firmware\n", dev->name);
- return -EBUSY;
- }
-
- return vxge_fw_upgrade(vdev, parms->data, 1);
-}
-
-static const struct ethtool_ops vxge_ethtool_ops = {
- .get_drvinfo = vxge_ethtool_gdrvinfo,
- .get_regs_len = vxge_ethtool_get_regs_len,
- .get_regs = vxge_ethtool_gregs,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = vxge_ethtool_getpause_data,
- .set_pauseparam = vxge_ethtool_setpause_data,
- .get_strings = vxge_ethtool_get_strings,
- .set_phys_id = vxge_ethtool_idnic,
- .get_sset_count = vxge_ethtool_get_sset_count,
- .get_ethtool_stats = vxge_get_ethtool_stats,
- .flash_device = vxge_fw_flash,
- .get_link_ksettings = vxge_ethtool_get_link_ksettings,
- .set_link_ksettings = vxge_ethtool_set_link_ksettings,
-};
-
-void vxge_initialize_ethtool_ops(struct net_device *ndev)
-{
- ndev->ethtool_ops = &vxge_ethtool_ops;
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
deleted file mode 100644
index 065a2c0429a4..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef _VXGE_ETHTOOL_H
-#define _VXGE_ETHTOOL_H
-
-#include "vxge-main.h"
-
-/* Ethtool related variables and Macros. */
-static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
-
-#define VXGE_TITLE_LEN 5
-#define VXGE_HW_VPATH_STATS_LEN 27
-#define VXGE_HW_AGGR_STATS_LEN 13
-#define VXGE_HW_PORT_STATS_LEN 94
-#define VXGE_HW_VPATH_TX_STATS_LEN 19
-#define VXGE_HW_VPATH_RX_STATS_LEN 42
-#define VXGE_SW_STATS_LEN 60
-#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\
- VXGE_HW_AGGR_STATS_LEN +\
- VXGE_HW_PORT_STATS_LEN +\
- VXGE_HW_VPATH_TX_STATS_LEN +\
- VXGE_HW_VPATH_RX_STATS_LEN)
-
-#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN)
-#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN)
-
-/* Maximum flicker time of adapter LED */
-#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */
-#define VXGE_FLICKER_ON 1
-#define VXGE_FLICKER_OFF 0
-
-#define vxge_add_string(fmt, size, buf, ...) {\
- snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \
- *size += ETH_GSTRING_LEN; \
-}
-
-#endif /*_VXGE_ETHTOOL_H*/
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
deleted file mode 100644
index fa5d4ddf429b..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ /dev/null
@@ -1,4808 +0,0 @@
-/******************************************************************************
-* This software may be used and distributed according to the terms of
-* the GNU General Public License (GPL), incorporated herein by reference.
-* Drivers based on or derived from this code fall under the GPL and must
-* retain the authorship, copyright and license notice. This file is not
-* a complete program and may only be used when the entire operating
-* system is licensed under the GPL.
-* See the file COPYING in this distribution for more information.
-*
-* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
-* Virtualized Server Adapter.
-* Copyright(c) 2002-2010 Exar Corp.
-*
-* The module loadable parameters that are supported by the driver and a brief
-* explanation of all the variables:
-* vlan_tag_strip:
-* Strip VLAN Tag enable/disable. Instructs the device to remove
-* the VLAN tag from all received tagged frames that are not
-* replicated at the internal L2 switch.
-* 0 - Do not strip the VLAN tag.
-* 1 - Strip the VLAN tag.
-*
-* addr_learn_en:
-* Enable learning the mac address of the guest OS interface in
-* a virtualization environment.
-* 0 - DISABLE
-* 1 - ENABLE
-*
-* max_config_port:
-* Maximum number of port to be supported.
-* MIN -1 and MAX - 2
-*
-* max_config_vpath:
-* This configures the maximum no of VPATH configures for each
-* device function.
-* MIN - 1 and MAX - 17
-*
-* max_config_dev:
-* This configures maximum no of Device function to be enabled.
-* MIN - 1 and MAX - 17
-*
-******************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <net/ip.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/firmware.h>
-#include <linux/net_tstamp.h>
-#include <linux/prefetch.h>
-#include <linux/module.h>
-#include "vxge-main.h"
-#include "vxge-reg.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
- "Virtualized Server Adapter");
-
-static const struct pci_device_id vxge_id_table[] = {
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
- PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
- PCI_ANY_ID},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, vxge_id_table);
-
-VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
-VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
-VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
-VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
-VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
-VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
-
-static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
- {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
-static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
- {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
-module_param_array(bw_percentage, uint, NULL, 0);
-
-static struct vxge_drv_config *driver_config;
-static void vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-static inline int is_vxge_card_up(struct vxgedev *vdev)
-{
- return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-}
-
-static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
-{
- struct sk_buff **skb_ptr = NULL;
- struct sk_buff **temp;
-#define NR_SKB_COMPLETED 16
- struct sk_buff *completed[NR_SKB_COMPLETED];
- int more;
-
- do {
- more = 0;
- skb_ptr = completed;
-
- if (__netif_tx_trylock(fifo->txq)) {
- vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
- NR_SKB_COMPLETED, &more);
- __netif_tx_unlock(fifo->txq);
- }
-
- /* free SKBs */
- for (temp = completed; temp != skb_ptr; temp++)
- dev_consume_skb_irq(*temp);
- } while (more);
-}
-
-static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
-{
- int i;
-
- /* Complete all transmits */
- for (i = 0; i < vdev->no_of_vpath; i++)
- VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
-}
-
-static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
-{
- int i;
- struct vxge_ring *ring;
-
- /* Complete all receives*/
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
- vxge_hw_vpath_poll_rx(ring->handle);
- }
-}
-
-/*
- * vxge_callback_link_up
- *
- * This function is called during interrupt context to notify link up state
- * change.
- */
-static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- vdev->ndev->name, __func__, __LINE__);
- netdev_notice(vdev->ndev, "Link Up\n");
- vdev->stats.link_up++;
-
- netif_carrier_on(vdev->ndev);
- netif_tx_wake_all_queues(vdev->ndev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_callback_link_down
- *
- * This function is called during interrupt context to notify link down state
- * change.
- */
-static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
- netdev_notice(vdev->ndev, "Link Down\n");
-
- vdev->stats.link_down++;
- netif_carrier_off(vdev->ndev);
- netif_tx_stop_all_queues(vdev->ndev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_rx_alloc
- *
- * Allocate SKB.
- */
-static struct sk_buff *
-vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
-{
- struct net_device *dev;
- struct sk_buff *skb;
- struct vxge_rx_priv *rx_priv;
-
- dev = ring->ndev;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
-
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
-
- /* try to allocate skb first. this one may fail */
- skb = netdev_alloc_skb(dev, skb_size +
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
- if (skb == NULL) {
- vxge_debug_mem(VXGE_ERR,
- "%s: out of memory to allocate SKB", dev->name);
- ring->stats.skb_alloc_fail++;
- return NULL;
- }
-
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d Skb : 0x%p", ring->ndev->name,
- __func__, __LINE__, skb);
-
- skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
-
- rx_priv->skb = skb;
- rx_priv->skb_data = NULL;
- rx_priv->data_size = skb_size;
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return skb;
-}
-
-/*
- * vxge_rx_map
- */
-static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
-{
- struct vxge_rx_priv *rx_priv;
- dma_addr_t dma_addr;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
-
- rx_priv->skb_data = rx_priv->skb->data;
- dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) {
- ring->stats.pci_map_fail++;
- return -EIO;
- }
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
- ring->ndev->name, __func__, __LINE__,
- (unsigned long long)dma_addr);
- vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
-
- rx_priv->data_dma = dma_addr;
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return 0;
-}
-
-/*
- * vxge_rx_initial_replenish
- * Allocation of RxD as an initial replenish procedure.
- */
-static enum vxge_hw_status
-vxge_rx_initial_replenish(void *dtrh, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct vxge_rx_priv *rx_priv;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- if (vxge_rx_alloc(dtrh, ring,
- VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
- return VXGE_HW_FAIL;
-
- if (vxge_rx_map(dtrh, ring)) {
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
- dev_kfree_skb(rx_priv->skb);
-
- return VXGE_HW_FAIL;
- }
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return VXGE_HW_OK;
-}
-
-static inline void
-vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
- int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
-{
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- skb_record_rx_queue(skb, ring->driver_id);
- skb->protocol = eth_type_trans(skb, ring->ndev);
-
- u64_stats_update_begin(&ring->stats.syncp);
- ring->stats.rx_frms++;
- ring->stats.rx_bytes += pkt_length;
-
- if (skb->pkt_type == PACKET_MULTICAST)
- ring->stats.rx_mcast++;
- u64_stats_update_end(&ring->stats.syncp);
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d skb protocol = %d",
- ring->ndev->name, __func__, __LINE__, skb->protocol);
-
- if (ext_info->vlan &&
- ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
- napi_gro_receive(ring->napi_p, skb);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-}
-
-static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
- struct vxge_rx_priv *rx_priv)
-{
- dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
- vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
-}
-
-static inline void vxge_post(int *dtr_cnt, void **first_dtr,
- void *post_dtr, struct __vxge_hw_ring *ringh)
-{
- int dtr_count = *dtr_cnt;
- if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
- if (*first_dtr)
- vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
- *first_dtr = post_dtr;
- } else
- vxge_hw_ring_rxd_post_post(ringh, post_dtr);
- dtr_count++;
- *dtr_cnt = dtr_count;
-}
-
-/*
- * vxge_rx_1b_compl
- *
- * If the interrupt is because of a received frame or if the receive ring
- * contains fresh as yet un-processed frames, this function is called.
- */
-static enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
- u8 t_code, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct net_device *dev = ring->ndev;
- unsigned int dma_sizes;
- void *first_dtr = NULL;
- int dtr_cnt = 0;
- int data_size;
- dma_addr_t data_dma;
- int pkt_length;
- struct sk_buff *skb;
- struct vxge_rx_priv *rx_priv;
- struct vxge_hw_ring_rxd_info ext_info;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
-
- if (ring->budget <= 0)
- goto out;
-
- do {
- prefetch((char *)dtr + L1_CACHE_BYTES);
- rx_priv = vxge_hw_ring_rxd_private_get(dtr);
- skb = rx_priv->skb;
- data_size = rx_priv->data_size;
- data_dma = rx_priv->data_dma;
- prefetch(rx_priv->skb_data);
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d skb = 0x%p",
- ring->ndev->name, __func__, __LINE__, skb);
-
- vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
- pkt_length = dma_sizes;
-
- pkt_length -= ETH_FCS_LEN;
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d Packet Length = %d",
- ring->ndev->name, __func__, __LINE__, pkt_length);
-
- vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
-
- /* check skb validity */
- vxge_assert(skb);
-
- prefetch((char *)skb + L1_CACHE_BYTES);
- if (unlikely(t_code)) {
- if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
- VXGE_HW_OK) {
-
- ring->stats.rx_errors++;
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s :%d Rx T_code is %d",
- ring->ndev->name, __func__,
- __LINE__, t_code);
-
- /* If the t_code is not supported and if the
- * t_code is other than 0x5 (unparseable packet
- * such as unknown UPV6 header), Drop it !!!
- */
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- ring->stats.rx_dropped++;
- continue;
- }
- }
-
- if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
- if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
- if (!vxge_rx_map(dtr, ring)) {
- skb_put(skb, pkt_length);
-
- dma_unmap_single(&ring->pdev->dev,
- data_dma, data_size,
- DMA_FROM_DEVICE);
-
- vxge_hw_ring_rxd_pre_post(ringh, dtr);
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- } else {
- dev_kfree_skb(rx_priv->skb);
- rx_priv->skb = skb;
- rx_priv->data_size = data_size;
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- ring->stats.rx_dropped++;
- break;
- }
- } else {
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- ring->stats.rx_dropped++;
- break;
- }
- } else {
- struct sk_buff *skb_up;
-
- skb_up = netdev_alloc_skb(dev, pkt_length +
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
- if (skb_up != NULL) {
- skb_reserve(skb_up,
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
-
- dma_sync_single_for_cpu(&ring->pdev->dev,
- data_dma, data_size,
- DMA_FROM_DEVICE);
-
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d skb_up = %p",
- ring->ndev->name, __func__,
- __LINE__, skb);
- memcpy(skb_up->data, skb->data, pkt_length);
-
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- /* will netif_rx small SKB instead */
- skb = skb_up;
- skb_put(skb, pkt_length);
- } else {
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- vxge_debug_rx(VXGE_ERR,
- "%s: vxge_rx_1b_compl: out of "
- "memory", dev->name);
- ring->stats.skb_alloc_fail++;
- break;
- }
- }
-
- if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
- !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
- (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
- ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
- ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
-
-
- if (ring->rx_hwts) {
- struct skb_shared_hwtstamps *skb_hwts;
- u32 ns = *(u32 *)(skb->head + pkt_length);
-
- skb_hwts = skb_hwtstamps(skb);
- skb_hwts->hwtstamp = ns_to_ktime(ns);
- }
-
- /* rth_hash_type and rth_it_hit are non-zero regardless of
- * whether rss is enabled. Only the rth_value is zero/non-zero
- * if rss is disabled/enabled, so key off of that.
- */
- if (ext_info.rth_value)
- skb_set_hash(skb, ext_info.rth_value,
- PKT_HASH_TYPE_L3);
-
- vxge_rx_complete(ring, skb, ext_info.vlan,
- pkt_length, &ext_info);
-
- ring->budget--;
- ring->pkts_processed++;
- if (!ring->budget)
- break;
-
- } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
- &t_code) == VXGE_HW_OK);
-
- if (first_dtr)
- vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
-
-out:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...",
- __func__, __LINE__);
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_xmit_compl
- *
- * If an interrupt was raised to indicate DMA complete of the Tx packet,
- * this function is called. It identifies the last TxD whose buffer was
- * freed and frees all skbs whose data have already DMA'ed into the NICs
- * internal memory.
- */
-static enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
- enum vxge_hw_fifo_tcode t_code, void *userdata,
- struct sk_buff ***skb_ptr, int nr_skb, int *more)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
- struct sk_buff *skb, **done_skb = *skb_ptr;
- int pkt_cnt = 0;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Entered....", __func__, __LINE__);
-
- do {
- int frg_cnt;
- skb_frag_t *frag;
- int i = 0, j;
- struct vxge_tx_priv *txd_priv =
- vxge_hw_fifo_txdl_private_get(dtr);
-
- skb = txd_priv->skb;
- frg_cnt = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
-
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d fifo_hw = %p dtr = %p "
- "tcode = 0x%x", fifo->ndev->name, __func__,
- __LINE__, fifo_hw, dtr, t_code);
- /* check skb validity */
- vxge_assert(skb);
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
- fifo->ndev->name, __func__, __LINE__,
- skb, txd_priv, frg_cnt);
- if (unlikely(t_code)) {
- fifo->stats.tx_errors++;
- vxge_debug_tx(VXGE_ERR,
- "%s: tx: dtr %p completed due to "
- "error t_code %01x", fifo->ndev->name,
- dtr, t_code);
- vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
- }
-
- /* for unfragmented skb */
- dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (j = 0; j < frg_cnt; j++) {
- dma_unmap_page(&fifo->pdev->dev,
- txd_priv->dma_buffers[i++],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-
- /* Updating the statistics block */
- u64_stats_update_begin(&fifo->stats.syncp);
- fifo->stats.tx_frms++;
- fifo->stats.tx_bytes += skb->len;
- u64_stats_update_end(&fifo->stats.syncp);
-
- *done_skb++ = skb;
-
- if (--nr_skb <= 0) {
- *more = 1;
- break;
- }
-
- pkt_cnt++;
- if (pkt_cnt > fifo->indicate_max_pkts)
- break;
-
- } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
- &dtr, &t_code) == VXGE_HW_OK);
-
- *skb_ptr = done_skb;
- if (netif_tx_queue_stopped(fifo->txq))
- netif_tx_wake_queue(fifo->txq);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- fifo->ndev->name, __func__, __LINE__);
- return VXGE_HW_OK;
-}
-
-/* select a vpath to transmit the packet */
-static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
-{
- u16 queue_len, counter = 0;
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *ip;
- struct tcphdr *th;
-
- ip = ip_hdr(skb);
-
- if (!ip_is_fragment(ip)) {
- th = (struct tcphdr *)(((unsigned char *)ip) +
- ip->ihl*4);
-
- queue_len = vdev->no_of_vpath;
- counter = (ntohs(th->source) +
- ntohs(th->dest)) &
- vdev->vpath_selector[queue_len - 1];
- if (counter >= queue_len)
- counter = queue_len - 1;
- }
- }
- return counter;
-}
-
-static enum vxge_hw_status vxge_search_mac_addr_in_list(
- struct vxge_vpath *vpath, u64 del_mac)
-{
- struct list_head *entry, *next;
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
- return TRUE;
- }
- return FALSE;
-}
-
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct vxge_mac_addrs *new_mac_entry;
- u8 *mac_address = NULL;
-
- if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
- return TRUE;
-
- new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
- if (!new_mac_entry) {
- vxge_debug_mem(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- return FALSE;
- }
-
- list_add(&new_mac_entry->item, &vpath->mac_addr_list);
-
- /* Copy the new mac address to the list */
- mac_address = (u8 *)&new_mac_entry->macaddr;
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- new_mac_entry->state = mac->state;
- vpath->mac_addr_cnt++;
-
- if (is_multicast_ether_addr(mac->macaddr))
- vpath->mcast_addr_cnt++;
-
- return TRUE;
-}
-
-/* Add a mac address to DA table */
-static enum vxge_hw_status
-vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
-
- if (is_multicast_ether_addr(mac->macaddr))
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
- else
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
- mac->macmask, duplicate_mode);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config add entry failed for vpath:%d",
- vpath->device_id);
- } else
- if (FALSE == vxge_mac_list_add(vpath, mac))
- status = -EPERM;
-
- return status;
-}
-
-static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
-{
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- u64 mac_addr = 0, vpath_vector = 0;
- int vpath_idx = 0;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath = NULL;
-
- mac_address = (u8 *)&mac_addr;
- memcpy(mac_address, mac_header, ETH_ALEN);
-
- /* Is this mac address already in the list? */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vxge_search_mac_addr_in_list(vpath, mac_addr))
- return vpath_idx;
- }
-
- memset(&mac_info, 0, sizeof(struct macInfo));
- memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
-
- /* Any vpath has room to add mac address to its da table? */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
- /* Add this mac address to this vpath */
- mac_info.vpath_no = vpath_idx;
- mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info);
- if (status != VXGE_HW_OK)
- return -EPERM;
- return vpath_idx;
- }
- }
-
- mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
- vpath_idx = 0;
- mac_info.vpath_no = vpath_idx;
- /* Is the first vpath already selected as catch-basin ? */
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
- /* Add this mac address to this vpath */
- if (FALSE == vxge_mac_list_add(vpath, &mac_info))
- return -EPERM;
- return vpath_idx;
- }
-
- /* Select first vpath as catch-basin */
- vpath_vector = vxge_mBIT(vpath->device_id);
- status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- vpath_vector);
- if (status != VXGE_HW_OK) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Unable to set the vpath-%d in catch-basin mode",
- VXGE_DRIVER_NAME, vpath->device_id);
- return -EPERM;
- }
-
- if (FALSE == vxge_mac_list_add(vpath, &mac_info))
- return -EPERM;
-
- return vpath_idx;
-}
-
-/**
- * vxge_xmit
- * @skb : the socket buffer containing the Tx data.
- * @dev : device pointer.
- *
- * This function is the Tx entry point of the driver. Neterion NIC supports
- * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
-*/
-static netdev_tx_t
-vxge_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct vxge_fifo *fifo = NULL;
- void *dtr_priv;
- void *dtr = NULL;
- struct vxgedev *vdev = NULL;
- enum vxge_hw_status status;
- int frg_cnt, first_frg_len;
- skb_frag_t *frag;
- int i = 0, j = 0, avail;
- u64 dma_pointer;
- struct vxge_tx_priv *txdl_priv = NULL;
- struct __vxge_hw_fifo *fifo_hw;
- int offload_type;
- int vpath_no = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- dev->name, __func__, __LINE__);
-
- /* A buffer with no data will be dropped */
- if (unlikely(skb->len <= 0)) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Buffer has no data..", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- vdev = netdev_priv(dev);
-
- if (unlikely(!is_vxge_card_up(vdev))) {
- vxge_debug_tx(VXGE_ERR,
- "%s: vdev not initialized", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (vdev->config.addr_learn_en) {
- vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
- if (vpath_no == -EPERM) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Failed to store the mac address",
- dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- }
-
- if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
- vpath_no = skb_get_queue_mapping(skb);
- else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
- vpath_no = vxge_get_vpath_no(vdev, skb);
-
- vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
-
- if (vpath_no >= vdev->no_of_vpath)
- vpath_no = 0;
-
- fifo = &vdev->vpaths[vpath_no].fifo;
- fifo_hw = fifo->handle;
-
- if (netif_tx_queue_stopped(fifo->txq))
- return NETDEV_TX_BUSY;
-
- avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
- if (avail == 0) {
- vxge_debug_tx(VXGE_ERR,
- "%s: No free TXDs available", dev->name);
- fifo->stats.txd_not_free++;
- goto _exit0;
- }
-
- /* Last TXD? Stop tx queue to avoid dropping packets. TX
- * completion will resume the queue.
- */
- if (avail == 1)
- netif_tx_stop_queue(fifo->txq);
-
- status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
- if (unlikely(status != VXGE_HW_OK)) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Out of descriptors .", dev->name);
- fifo->stats.txd_out_of_desc++;
- goto _exit0;
- }
-
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
- dev->name, __func__, __LINE__,
- fifo_hw, dtr, dtr_priv);
-
- if (skb_vlan_tag_present(skb)) {
- u16 vlan_tag = skb_vlan_tag_get(skb);
- vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
- }
-
- first_frg_len = skb_headlen(skb);
-
- dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data,
- first_frg_len, DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) {
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
- fifo->stats.pci_map_fail++;
- goto _exit0;
- }
-
- txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
- txdl_priv->skb = skb;
- txdl_priv->dma_buffers[j] = dma_pointer;
-
- frg_cnt = skb_shinfo(skb)->nr_frags;
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d skb = %p txdl_priv = %p "
- "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
- __func__, __LINE__, skb, txdl_priv,
- frg_cnt, (unsigned long long)dma_pointer);
-
- vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- first_frg_len);
-
- frag = &skb_shinfo(skb)->frags[0];
- for (i = 0; i < frg_cnt; i++) {
- /* ignore 0 length fragment */
- if (!skb_frag_size(frag))
- continue;
-
- dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
- goto _exit2;
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d frag = %d dma_pointer = 0x%llx",
- dev->name, __func__, __LINE__, i,
- (unsigned long long)dma_pointer);
-
- txdl_priv->dma_buffers[j] = dma_pointer;
- vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- skb_frag_size(frag));
- frag += 1;
- }
-
- offload_type = vxge_offload_type(skb);
-
- if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- int mss = vxge_tcp_mss(skb);
- if (mss) {
- vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
- dev->name, __func__, __LINE__, mss);
- vxge_hw_fifo_txdl_mss_set(dtr, mss);
- } else {
- vxge_assert(skb->len <=
- dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
- vxge_assert(0);
- goto _exit1;
- }
- }
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vxge_hw_fifo_txdl_cksum_set_bits(dtr,
- VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
- VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
- VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
-
- vxge_hw_fifo_txdl_post(fifo_hw, dtr);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
- dev->name, __func__, __LINE__);
- return NETDEV_TX_OK;
-
-_exit2:
- vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
-_exit1:
- j = 0;
- frag = &skb_shinfo(skb)->frags[0];
-
- dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (; j < i; j++) {
- dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-_exit0:
- netif_tx_stop_queue(fifo->txq);
- dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
-}
-
-/*
- * vxge_rx_term
- *
- * Function will be called by hw function to abort all outstanding receive
- * descriptors.
- */
-static void
-vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct vxge_rx_priv *rx_priv =
- vxge_hw_ring_rxd_private_get(dtrh);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- if (state != VXGE_HW_RXD_STATE_POSTED)
- return;
-
- dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- dev_kfree_skb(rx_priv->skb);
- rx_priv->skb_data = NULL;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- ring->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_tx_term
- *
- * Function will be called to abort all outstanding tx descriptors
- */
-static void
-vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
- skb_frag_t *frag;
- int i = 0, j, frg_cnt;
- struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
- struct sk_buff *skb = txd_priv->skb;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if (state != VXGE_HW_TXDL_STATE_POSTED)
- return;
-
- /* check skb validity */
- vxge_assert(skb);
- frg_cnt = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
-
- /* for unfragmented skb */
- dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (j = 0; j < frg_cnt; j++) {
- dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- dev_kfree_skb(skb);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct list_head *entry, *next;
- u64 del_mac = 0;
- u8 *mac_address = (u8 *) (&del_mac);
-
- /* Copy the mac address to delete from the list */
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
- list_del(entry);
- kfree(entry);
- vpath->mac_addr_cnt--;
-
- if (is_multicast_ether_addr(mac->macaddr))
- vpath->mcast_addr_cnt--;
- return TRUE;
- }
- }
-
- return FALSE;
-}
-
-/* delete a mac address from DA table */
-static enum vxge_hw_status
-vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
- mac->macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config delete entry failed for vpath:%d",
- vpath->device_id);
- } else
- vxge_mac_list_del(vpath, mac);
- return status;
-}
-
-/**
- * vxge_set_multicast
- * @dev: pointer to the device structure
- *
- * Entry point for multicast address enable/disable
- * This function is a driver entry point which gets called by the kernel
- * whenever multicast addresses must be enabled/disabled. This also gets
- * called to set/reset promiscuous mode. Depending on the deivce flag, we
- * determine, if multicast address must be enabled or if promiscuous mode
- * is to be disabled etc.
- */
-static void vxge_set_multicast(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- struct vxgedev *vdev;
- int i, mcast_cnt = 0;
- struct vxge_vpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- int vpath_idx = 0;
- struct vxge_mac_addrs *mac_entry;
- struct list_head *list_head;
- struct list_head *entry, *next;
- u8 *mac_address = NULL;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return;
-
- if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to enable "
- "multicast, status %d", status);
- vdev->all_multi_flg = 1;
- }
- } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
- status = vxge_hw_vpath_mcast_disable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to disable "
- "multicast, status %d", status);
- vdev->all_multi_flg = 0;
- }
- }
-
-
- if (!vdev->config.addr_learn_en) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
-
- if (dev->flags & IFF_PROMISC)
- status = vxge_hw_vpath_promisc_enable(
- vpath->handle);
- else
- status = vxge_hw_vpath_promisc_disable(
- vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to %s promisc"
- ", status %d", dev->flags&IFF_PROMISC ?
- "enable" : "disable", status);
- }
- }
-
- memset(&mac_info, 0, sizeof(struct macInfo));
- /* Update individual M_CAST address list */
- if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
- mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
- list_head = &vdev->vpaths[0].mac_addr_list;
- if ((netdev_mc_count(dev) +
- (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
- vdev->vpaths[0].max_mac_addr_cnt)
- goto _set_all_mcast;
-
- /* Delete previous MC's */
- for (i = 0; i < mcast_cnt; i++) {
- list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *)entry;
- /* Copy the mac address to delete */
- mac_address = (u8 *)&mac_entry->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-
- if (is_multicast_ether_addr(mac_info.macaddr)) {
- for (vpath_idx = 0; vpath_idx <
- vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(
- vdev,
- &mac_info);
- }
- }
- }
- }
-
- /* Add new ones */
- netdev_for_each_mc_addr(ha, dev) {
- memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Setting individual"
- "multicast address failed",
- __func__, __LINE__);
- goto _set_all_mcast;
- }
- }
- }
-
- return;
-_set_all_mcast:
- mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
- /* Delete previous MC's */
- for (i = 0; i < mcast_cnt; i++) {
- list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *)entry;
- /* Copy the mac address to delete */
- mac_address = (u8 *)&mac_entry->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-
- if (is_multicast_ether_addr(mac_info.macaddr))
- break;
- }
-
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(vdev, &mac_info);
- }
- }
-
- /* Enable all multicast */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
-
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling all multicasts failed",
- __func__, __LINE__);
- }
- vdev->all_multi_flg = 1;
- }
- dev->flags |= IFF_ALLMULTI;
- }
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-/**
- * vxge_set_mac_addr
- * @dev: pointer to the device structure
- * @p: socket info
- *
- * Update entry "0" (default MAC addr)
- */
-static int vxge_set_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
- struct vxgedev *vdev;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info_new, mac_info_old;
- int vpath_idx = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memset(&mac_info_new, 0, sizeof(struct macInfo));
- memset(&mac_info_old, 0, sizeof(struct macInfo));
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
- __func__, __LINE__);
-
- /* Get the old address */
- memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
-
- /* Copy the new address */
- memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
-
- /* First delete the old mac address from all the vpaths
- as we can't specify the index while adding new mac address */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
- if (!vpath->is_open) {
- /* This can happen when this interface is added/removed
- to the bonding interface. Delete this station address
- from the linked list */
- vxge_mac_list_del(vpath, &mac_info_old);
-
- /* Add this new address to the linked list
- for later restoring */
- vxge_mac_list_add(vpath, &mac_info_new);
-
- continue;
- }
- /* Delete the station address */
- mac_info_old.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(vdev, &mac_info_old);
- }
-
- if (unlikely(!is_vxge_card_up(vdev))) {
- eth_hw_addr_set(dev, addr->sa_data);
- return VXGE_HW_OK;
- }
-
- /* Set this mac address to all the vpaths */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- mac_info_new.vpath_no = vpath_idx;
- mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info_new);
- if (status != VXGE_HW_OK)
- return -EINVAL;
- }
-
- eth_hw_addr_set(dev, addr->sa_data);
-
- return status;
-}
-
-/*
- * vxge_vpath_intr_enable
- * @vdev: pointer to vdev
- * @vp_id: vpath for which to enable the interrupts
- *
- * Enables the interrupts for the vpath
-*/
-static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
-{
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int msix_id = 0;
- int tim_msix_id[4] = {0, 1, 0, 0};
- int alarm_msix_id = VXGE_ALARM_MSIX_ID;
-
- vxge_hw_vpath_intr_enable(vpath->handle);
-
- if (vdev->config.intr_type == INTA)
- vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
- else {
- vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
- alarm_msix_id);
-
- msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
-
- /* enable the alarm vector */
- msix_id = (vpath->handle->vpath->hldev->first_vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
- }
-}
-
-/*
- * vxge_vpath_intr_disable
- * @vdev: pointer to vdev
- * @vp_id: vpath for which to disable the interrupts
- *
- * Disables the interrupts for the vpath
-*/
-static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
-{
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- struct __vxge_hw_device *hldev;
- int msix_id;
-
- hldev = pci_get_drvdata(vdev->pdev);
-
- vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
-
- vxge_hw_vpath_intr_disable(vpath->handle);
-
- if (vdev->config.intr_type == INTA)
- vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
- else {
- msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
-
- /* disable the alarm vector */
- msix_id = (vpath->handle->vpath->hldev->first_vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
- }
-}
-
-/* list all mac addresses from DA table */
-static enum vxge_hw_status
-vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- unsigned char macmask[ETH_ALEN];
- unsigned char macaddr[ETH_ALEN];
-
- status = vxge_hw_vpath_mac_addr_get(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config list entry failed for vpath:%d",
- vpath->device_id);
- return status;
- }
-
- while (!ether_addr_equal(mac->macaddr, macaddr)) {
- status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK)
- break;
- }
-
- return status;
-}
-
-/* Store all mac addresses from the list to the DA table */
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- struct list_head *entry, *next;
-
- memset(&mac_info, 0, sizeof(struct macInfo));
-
- if (vpath->is_open) {
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- mac_address =
- (u8 *)&
- ((struct vxge_mac_addrs *)entry)->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
- ((struct vxge_mac_addrs *)entry)->state =
- VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- /* does this mac address already exist in da table? */
- status = vxge_search_mac_addr_in_da_table(vpath,
- &mac_info);
- if (status != VXGE_HW_OK) {
- /* Add this mac address to the DA table */
- status = vxge_hw_vpath_mac_addr_add(
- vpath->handle, mac_info.macaddr,
- mac_info.macmask,
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA add entry failed for vpath:%d",
- vpath->device_id);
- ((struct vxge_mac_addrs *)entry)->state
- = VXGE_LL_MAC_ADDR_IN_LIST;
- }
- }
- }
- }
-
- return status;
-}
-
-/* Store all vlan ids from the list to the vid table */
-static enum vxge_hw_status
-vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev = vpath->vdev;
- u16 vid;
-
- if (!vpath->is_open)
- return status;
-
- for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
- status = vxge_hw_vpath_vid_add(vpath->handle, vid);
-
- return status;
-}
-
-/*
- * vxge_reset_vpath
- * @vdev: pointer to vdev
- * @vp_id: vpath to reset
- *
- * Resets the vpath
-*/
-static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int ret = 0;
-
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* is device reset already scheduled */
- if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- return 0;
-
- if (vpath->handle) {
- if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
- if (is_vxge_card_up(vdev) &&
- vxge_hw_vpath_recover_from_reset(vpath->handle)
- != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_from_reset"
- "failed for vpath:%d", vp_id);
- return status;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for"
- "vpath:%d", vp_id);
- return status;
- }
- } else
- return VXGE_HW_FAIL;
-
- vxge_restore_vpath_mac_addr(vpath);
- vxge_restore_vpath_vid_table(vpath);
-
- /* Enable all broadcast */
- vxge_hw_vpath_bcast_enable(vpath->handle);
-
- /* Enable all multicast */
- if (vdev->all_multi_flg) {
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling multicast failed",
- __func__, __LINE__);
- }
-
- /* Enable the interrupts */
- vxge_vpath_intr_enable(vdev, vp_id);
-
- smp_wmb();
-
- /* Enable the flow of traffic through the vpath */
- vxge_hw_vpath_enable(vpath->handle);
-
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vpath->handle);
- vpath->ring.last_status = VXGE_HW_OK;
-
- /* Vpath reset done */
- clear_bit(vp_id, &vdev->vp_reset);
-
- /* Start the vpath queue */
- if (netif_tx_queue_stopped(vpath->fifo.txq))
- netif_tx_wake_queue(vpath->fifo.txq);
-
- return ret;
-}
-
-/* Configure CI */
-static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
-{
- int i = 0;
-
- /* Enable CI for RTI */
- if (vdev->config.intr_type == MSI_X) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct __vxge_hw_ring *hw_ring;
-
- hw_ring = vdev->vpaths[i].ring.handle;
- vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
- }
- }
-
- /* Enable CI for TTI */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
- vxge_hw_vpath_tti_ci_set(hw_fifo);
- /*
- * For Inta (with or without napi), Set CI ON for only one
- * vpath. (Have only one free running timer).
- */
- if ((vdev->config.intr_type == INTA) && (i == 0))
- break;
- }
-
- return;
-}
-
-static int do_vxge_reset(struct vxgedev *vdev, int event)
-{
- int ret = 0, vp_id, i;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* is reset already scheduled */
- if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- return 0;
- }
-
- if (event == VXGE_LL_FULL_RESET) {
- netif_carrier_off(vdev->ndev);
-
- /* wait for all the vpath reset to complete */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- while (test_bit(vp_id, &vdev->vp_reset))
- msleep(50);
- }
-
- netif_carrier_on(vdev->ndev);
-
- /* if execution mode is set to debug, don't reset the adapter */
- if (unlikely(vdev->exec_mode)) {
- vxge_debug_init(VXGE_ERR,
- "%s: execution mode is debug, returning..",
- vdev->ndev->name);
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- netif_tx_stop_all_queues(vdev->ndev);
- return 0;
- }
- }
-
- if (event == VXGE_LL_FULL_RESET) {
- vxge_hw_device_wait_receive_idle(vdev->devh);
- vxge_hw_device_intr_disable(vdev->devh);
-
- switch (vdev->cric_err_event) {
- case VXGE_HW_EVENT_UNKNOWN:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "unknown error",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_RESET_START:
- break;
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- case VXGE_HW_EVENT_ALARM_CLEARED:
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- break;
- case VXGE_HW_EVENT_CRITICAL_ERR:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "serious error",
- vdev->ndev->name);
- /* SOP or device reset required */
- /* This event is not currently used */
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SERR:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "serious error",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "slot freeze",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- default:
- break;
-
- }
- }
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
- netif_tx_stop_all_queues(vdev->ndev);
-
- if (event == VXGE_LL_FULL_RESET) {
- vxge_reset_all_vpaths(vdev);
- }
-
- if (event == VXGE_LL_COMPL_RESET) {
- for (i = 0; i < vdev->no_of_vpath; i++)
- if (vdev->vpaths[i].handle) {
- if (vxge_hw_vpath_recover_from_reset(
- vdev->vpaths[i].handle)
- != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_"
- "from_reset failed for vpath: "
- "%d", i);
- ret = -EPERM;
- goto out;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for "
- "vpath:%d", i);
- ret = -EPERM;
- goto out;
- }
- }
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
- /* Reprogram the DA table with populated mac addresses */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
- vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
- }
-
- /* enable vpath interrupts */
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_vpath_intr_enable(vdev, i);
-
- vxge_hw_device_intr_enable(vdev->devh);
-
- smp_wmb();
-
- /* Indicate card up */
- set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- /* Get the traffic to flow through the vpaths */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_hw_vpath_enable(vdev->vpaths[i].handle);
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
- }
-
- netif_tx_wake_all_queues(vdev->ndev);
- }
-
- /* configure CI */
- vxge_config_ci_for_tti_rti(vdev);
-
-out:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-
- /* Indicate reset done */
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
- clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
- return ret;
-}
-
-/*
- * vxge_reset
- * @vdev: pointer to ll device
- *
- * driver may reset the chip on events of serr, eccerr, etc
- */
-static void vxge_reset(struct work_struct *work)
-{
- struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
-
- if (!netif_running(vdev->ndev))
- return;
-
- do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
-}
-
-/**
- * vxge_poll_msix - Receive handler when Receive Polling is used.
- * @napi: pointer to the napi structure.
- * @budget: Number of packets budgeted to be processed in this iteration.
- *
- * This function comes into picture only if Receive side is being handled
- * through polling (called NAPI in linux). It mostly does what the normal
- * Rx interrupt handler does in terms of descriptor and packet processing
- * but not in an interrupt context. Also it will process a specified number
- * of packets at most in one iteration. This value is passed down by the
- * kernel as the function argument 'budget'.
- */
-static int vxge_poll_msix(struct napi_struct *napi, int budget)
-{
- struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
- int pkts_processed;
- int budget_org = budget;
-
- ring->budget = budget;
- ring->pkts_processed = 0;
- vxge_hw_vpath_poll_rx(ring->handle);
- pkts_processed = ring->pkts_processed;
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
-
- /* Re enable the Rx interrupts for the vpath */
- vxge_hw_channel_msix_unmask(
- (struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
- }
-
- /* We are copying and returning the local variable, in case if after
- * clearing the msix interrupt above, if the interrupt fires right
- * away which can preempt this NAPI thread */
- return pkts_processed;
-}
-
-static int vxge_poll_inta(struct napi_struct *napi, int budget)
-{
- struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
- int pkts_processed = 0;
- int i;
- int budget_org = budget;
- struct vxge_ring *ring;
-
- struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
- ring->budget = budget;
- ring->pkts_processed = 0;
- vxge_hw_vpath_poll_rx(ring->handle);
- pkts_processed += ring->pkts_processed;
- budget -= ring->pkts_processed;
- if (budget <= 0)
- break;
- }
-
- VXGE_COMPLETE_ALL_TX(vdev);
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
- /* Re enable the Rx interrupts for the ring */
- vxge_hw_device_unmask_all(hldev);
- vxge_hw_device_flush_io(hldev);
- }
-
- return pkts_processed;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * vxge_netpoll - netpoll event handler entry point
- * @dev : pointer to the device structure.
- * Description:
- * This function will be called by upper layer to check for events on the
- * interface in situations where interrupts are disabled. It is used for
- * specific in-kernel networking tasks, such as remote consoles and kernel
- * debugging over the network (example netdump in RedHat).
- */
-static void vxge_netpoll(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct pci_dev *pdev = vdev->pdev;
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- const int irq = pdev->irq;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if (pci_channel_offline(pdev))
- return;
-
- disable_irq(irq);
- vxge_hw_device_clear_tx_rx(hldev);
-
- vxge_hw_device_clear_tx_rx(hldev);
- VXGE_COMPLETE_ALL_RX(vdev);
- VXGE_COMPLETE_ALL_TX(vdev);
-
- enable_irq(irq);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-#endif
-
-/* RTH configuration */
-static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_rth_hash_types hash_types;
- u8 itable[256] = {0}; /* indirection table */
- u8 mtable[256] = {0}; /* CPU to vpath mapping */
- int index;
-
- /*
- * Filling
- * - itable with bucket numbers
- * - mtable with bucket-to-vpath mapping
- */
- for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
- itable[index] = index;
- mtable[index] = index % vdev->no_of_vpath;
- }
-
- /* set indirection table, bucket-to-vpath mapping */
- status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
- vdev->no_of_vpath,
- mtable, itable,
- vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "RTH indirection table configuration failed "
- "for vpath:%d", vdev->vpaths[0].device_id);
- return status;
- }
-
- /* Fill RTH hash types */
- hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
- hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
- hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
- hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
- hash_types.hash_type_tcpipv6ex_en =
- vdev->config.rth_hash_type_tcpipv6ex;
- hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
-
- /*
- * Because the itable_set() method uses the active_table field
- * for the target virtual path the RTH config should be updated
- * for all VPATHs. The h/w only uses the lowest numbered VPATH
- * when steering frames.
- */
- for (index = 0; index < vdev->no_of_vpath; index++) {
- status = vxge_hw_vpath_rts_rth_set(
- vdev->vpaths[index].handle,
- vdev->config.rth_algorithm,
- &hash_types,
- vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "RTH configuration failed for vpath:%d",
- vdev->vpaths[index].device_id);
- return status;
- }
- }
-
- return status;
-}
-
-/* reset vpaths */
-static void vxge_reset_all_vpaths(struct vxgedev *vdev)
-{
- struct vxge_vpath *vpath;
- int i;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- if (vpath->handle) {
- if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
- if (is_vxge_card_up(vdev) &&
- vxge_hw_vpath_recover_from_reset(
- vpath->handle) != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_"
- "from_reset failed for vpath: "
- "%d", i);
- return;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for "
- "vpath:%d", i);
- return;
- }
- }
- }
-}
-
-/* close vpaths */
-static void vxge_close_vpaths(struct vxgedev *vdev, int index)
-{
- struct vxge_vpath *vpath;
- int i;
-
- for (i = index; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- if (vpath->handle && vpath->is_open) {
- vxge_hw_vpath_close(vpath->handle);
- vdev->stats.vpaths_open--;
- }
- vpath->is_open = 0;
- vpath->handle = NULL;
- }
-}
-
-/* open vpaths */
-static int vxge_open_vpaths(struct vxgedev *vdev)
-{
- struct vxge_hw_vpath_attr attr;
- enum vxge_hw_status status;
- struct vxge_vpath *vpath;
- u32 vp_id = 0;
- int i;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_configured);
-
- if (!vdev->titan1) {
- struct vxge_hw_vp_config *vcfg;
- vcfg = &vdev->devh->config.vp_config[vpath->device_id];
-
- vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
- vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
- vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
- vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
- vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
- vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
- vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
- vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
- vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
- }
-
- attr.vp_id = vpath->device_id;
- attr.fifo_attr.callback = vxge_xmit_compl;
- attr.fifo_attr.txdl_term = vxge_tx_term;
- attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
- attr.fifo_attr.userdata = &vpath->fifo;
-
- attr.ring_attr.callback = vxge_rx_1b_compl;
- attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
- attr.ring_attr.rxd_term = vxge_rx_term;
- attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
- attr.ring_attr.userdata = &vpath->ring;
-
- vpath->ring.ndev = vdev->ndev;
- vpath->ring.pdev = vdev->pdev;
-
- status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
- if (status == VXGE_HW_OK) {
- vpath->fifo.handle =
- (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
- vpath->ring.handle =
- (struct __vxge_hw_ring *)attr.ring_attr.userdata;
- vpath->fifo.tx_steering_type =
- vdev->config.tx_steering_type;
- vpath->fifo.ndev = vdev->ndev;
- vpath->fifo.pdev = vdev->pdev;
-
- u64_stats_init(&vpath->fifo.stats.syncp);
- u64_stats_init(&vpath->ring.stats.syncp);
-
- if (vdev->config.tx_steering_type)
- vpath->fifo.txq =
- netdev_get_tx_queue(vdev->ndev, i);
- else
- vpath->fifo.txq =
- netdev_get_tx_queue(vdev->ndev, 0);
- vpath->fifo.indicate_max_pkts =
- vdev->config.fifo_indicate_max_pkts;
- vpath->fifo.tx_vector_no = 0;
- vpath->ring.rx_vector_no = 0;
- vpath->ring.rx_hwts = vdev->rx_hwts;
- vpath->is_open = 1;
- vdev->vp_handles[i] = vpath->handle;
- vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
- vdev->stats.vpaths_open++;
- } else {
- vdev->stats.vpath_open_fail++;
- vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
- "open with status: %d",
- vdev->ndev->name, vpath->device_id,
- status);
- vxge_close_vpaths(vdev, 0);
- return -EPERM;
- }
-
- vp_id = vpath->handle->vpath->vp_id;
- vdev->vpaths_deployed |= vxge_mBIT(vp_id);
- }
-
- return VXGE_HW_OK;
-}
-
-/**
- * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
- * if the interrupts are not within a range
- * @fifo: pointer to transmit fifo structure
- * Description: The function changes boundary timer and restriction timer
- * value depends on the traffic
- * Return Value: None
- */
-static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
-{
- fifo->interrupt_count++;
- if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
- struct __vxge_hw_fifo *hw_fifo = fifo->handle;
-
- fifo->jiffies = jiffies;
- if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
- hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
- hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
- vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
- } else if (hw_fifo->rtimer != 0) {
- hw_fifo->rtimer = 0;
- vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
- }
- fifo->interrupt_count = 0;
- }
-}
-
-/**
- * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
- * if the interrupts are not within a range
- * @ring: pointer to receive ring structure
- * Description: The function increases of decreases the packet counts within
- * the ranges of traffic utilization, if the interrupts due to this ring are
- * not within a fixed range.
- * Return Value: Nothing
- */
-static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
-{
- ring->interrupt_count++;
- if (time_before(ring->jiffies + HZ / 100, jiffies)) {
- struct __vxge_hw_ring *hw_ring = ring->handle;
-
- ring->jiffies = jiffies;
- if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
- hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
- hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
- vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
- } else if (hw_ring->rtimer != 0) {
- hw_ring->rtimer = 0;
- vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
- }
- ring->interrupt_count = 0;
- }
-}
-
-/*
- * vxge_isr_napi
- * @irq: the irq of the device.
- * @dev_id: a void pointer to the hldev structure of the Titan device
- * @ptregs: pointer to the registers pushed on the stack.
- *
- * This function is the ISR handler of the device when napi is enabled. It
- * identifies the reason for the interrupt and calls the relevant service
- * routines.
- */
-static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
-{
- struct __vxge_hw_device *hldev;
- u64 reason;
- enum vxge_hw_status status;
- struct vxgedev *vdev = (struct vxgedev *)dev_id;
-
- vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- hldev = pci_get_drvdata(vdev->pdev);
-
- if (pci_channel_offline(vdev->pdev))
- return IRQ_NONE;
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return IRQ_HANDLED;
-
- status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
- if (status == VXGE_HW_OK) {
- vxge_hw_device_mask_all(hldev);
-
- if (reason &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
- vdev->vpaths_deployed >>
- (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
-
- vxge_hw_device_clear_tx_rx(hldev);
- napi_schedule(&vdev->napi);
- vxge_debug_intr(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
- return IRQ_HANDLED;
- } else
- vxge_hw_device_unmask_all(hldev);
- } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
- (status == VXGE_HW_ERR_CRITICAL) ||
- (status == VXGE_HW_ERR_FIFO))) {
- vxge_hw_device_mask_all(hldev);
- vxge_hw_device_flush_io(hldev);
- return IRQ_HANDLED;
- } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
- return IRQ_HANDLED;
-
- vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
- return IRQ_NONE;
-}
-
-static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
-
- adaptive_coalesce_tx_interrupts(fifo);
-
- vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- VXGE_COMPLETE_VPATH_TX(fifo);
-
- vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
-{
- struct vxge_ring *ring = (struct vxge_ring *)dev_id;
-
- adaptive_coalesce_rx_interrupts(ring);
-
- vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
-
- vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
-
- napi_schedule(&ring->napi);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-vxge_alarm_msix_handle(int irq, void *dev_id)
-{
- int i;
- enum vxge_hw_status status;
- struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
- struct vxgedev *vdev = vpath->vdev;
- int msix_id = (vpath->handle->vpath->vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- /* Reduce the chance of losing alarm interrupts by masking
- * the vector. A pending bit will be set if an alarm is
- * generated and on unmask the interrupt will be fired.
- */
- vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
- vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
-
- status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
- vdev->exec_mode);
- if (status == VXGE_HW_OK) {
- vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- msix_id);
- continue;
- }
- vxge_debug_intr(VXGE_ERR,
- "%s: vxge_hw_vpath_alarm_process failed %x ",
- VXGE_DRIVER_NAME, status);
- }
- return IRQ_HANDLED;
-}
-
-static int vxge_alloc_msix(struct vxgedev *vdev)
-{
- int j, i, ret = 0;
- int msix_intr_vect = 0, temp;
- vdev->intr_cnt = 0;
-
-start:
- /* Tx/Rx MSIX Vectors count */
- vdev->intr_cnt = vdev->no_of_vpath * 2;
-
- /* Alarm MSIX Vectors count */
- vdev->intr_cnt++;
-
- vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!vdev->entries) {
- vxge_debug_init(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- ret = -ENOMEM;
- goto alloc_entries_failed;
- }
-
- vdev->vxge_entries = kcalloc(vdev->intr_cnt,
- sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
- if (!vdev->vxge_entries) {
- vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- ret = -ENOMEM;
- goto alloc_vxge_entries_failed;
- }
-
- for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
-
- msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
-
- /* Initialize the fifo vector */
- vdev->entries[j].entry = msix_intr_vect;
- vdev->vxge_entries[j].entry = msix_intr_vect;
- vdev->vxge_entries[j].in_use = 0;
- j++;
-
- /* Initialize the ring vector */
- vdev->entries[j].entry = msix_intr_vect + 1;
- vdev->vxge_entries[j].entry = msix_intr_vect + 1;
- vdev->vxge_entries[j].in_use = 0;
- j++;
- }
-
- /* Initialize the alarm vector */
- vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
- vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
- vdev->vxge_entries[j].in_use = 0;
-
- ret = pci_enable_msix_range(vdev->pdev,
- vdev->entries, 3, vdev->intr_cnt);
- if (ret < 0) {
- ret = -ENODEV;
- goto enable_msix_failed;
- } else if (ret < vdev->intr_cnt) {
- pci_disable_msix(vdev->pdev);
-
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enable failed for %d vectors, ret: %d",
- VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
- if (max_config_vpath != VXGE_USE_DEFAULT) {
- ret = -ENODEV;
- goto enable_msix_failed;
- }
-
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- vdev->entries = NULL;
- vdev->vxge_entries = NULL;
- /* Try with less no of vector by reducing no of vpaths count */
- temp = (ret - 1)/2;
- vxge_close_vpaths(vdev, temp);
- vdev->no_of_vpath = temp;
- goto start;
- }
- return 0;
-
-enable_msix_failed:
- kfree(vdev->vxge_entries);
-alloc_vxge_entries_failed:
- kfree(vdev->entries);
-alloc_entries_failed:
- return ret;
-}
-
-static int vxge_enable_msix(struct vxgedev *vdev)
-{
-
- int i, ret = 0;
- /* 0 - Tx, 1 - Rx */
- int tim_msix_id[4] = {0, 1, 0, 0};
-
- vdev->intr_cnt = 0;
-
- /* allocate msix vectors */
- ret = vxge_alloc_msix(vdev);
- if (!ret) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct vxge_vpath *vpath = &vdev->vpaths[i];
-
- /* If fifo or ring are not enabled, the MSIX vector for
- * it should be set to 0.
- */
- vpath->ring.rx_vector_no = (vpath->device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
-
- vpath->fifo.tx_vector_no = (vpath->device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE);
-
- vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
- VXGE_ALARM_MSIX_ID);
- }
- }
-
- return ret;
-}
-
-static void vxge_rem_msix_isr(struct vxgedev *vdev)
-{
- int intr_cnt;
-
- for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
- intr_cnt++) {
- if (vdev->vxge_entries[intr_cnt].in_use) {
- free_irq(vdev->entries[intr_cnt].vector,
- vdev->vxge_entries[intr_cnt].arg);
- vdev->vxge_entries[intr_cnt].in_use = 0;
- }
- }
-
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- vdev->entries = NULL;
- vdev->vxge_entries = NULL;
-
- if (vdev->config.intr_type == MSI_X)
- pci_disable_msix(vdev->pdev);
-}
-
-static void vxge_rem_isr(struct vxgedev *vdev)
-{
- if (IS_ENABLED(CONFIG_PCI_MSI) &&
- vdev->config.intr_type == MSI_X) {
- vxge_rem_msix_isr(vdev);
- } else if (vdev->config.intr_type == INTA) {
- free_irq(vdev->pdev->irq, vdev);
- }
-}
-
-static int vxge_add_isr(struct vxgedev *vdev)
-{
- int ret = 0;
- int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
- int pci_fun = PCI_FUNC(vdev->pdev->devfn);
-
- if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
- ret = vxge_enable_msix(vdev);
-
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
- vdev->config.intr_type = INTA;
- }
-
- if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
- for (intr_idx = 0;
- intr_idx < (vdev->no_of_vpath *
- VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
-
- msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
- irq_req = 0;
-
- switch (msix_idx) {
- case 0:
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun, vp_idx);
- ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_tx_msix_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx].fifo);
- vdev->vxge_entries[intr_cnt].arg =
- &vdev->vpaths[vp_idx].fifo;
- irq_req = 1;
- break;
- case 1:
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun, vp_idx);
- ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_rx_msix_napi_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx].ring);
- vdev->vxge_entries[intr_cnt].arg =
- &vdev->vpaths[vp_idx].ring;
- irq_req = 1;
- break;
- }
-
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSIX - %d Registration failed",
- vdev->ndev->name, intr_cnt);
- vxge_rem_msix_isr(vdev);
- vdev->config.intr_type = INTA;
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA",
- vdev->ndev->name);
- goto INTA_MODE;
- }
-
- if (irq_req) {
- /* We requested for this msix interrupt */
- vdev->vxge_entries[intr_cnt].in_use = 1;
- msix_idx += vdev->vpaths[vp_idx].device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_unmask(
- vdev->vpaths[vp_idx].handle,
- msix_idx);
- intr_cnt++;
- }
-
- /* Point to next vpath handler */
- if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
- (vp_idx < (vdev->no_of_vpath - 1)))
- vp_idx++;
- }
-
- intr_cnt = vdev->no_of_vpath * 2;
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Alarm - fn:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun);
- /* For Alarm interrupts */
- ret = request_irq(vdev->entries[intr_cnt].vector,
- vxge_alarm_msix_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[0]);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSIX - %d Registration failed",
- vdev->ndev->name, intr_cnt);
- vxge_rem_msix_isr(vdev);
- vdev->config.intr_type = INTA;
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA",
- vdev->ndev->name);
- goto INTA_MODE;
- }
-
- msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
- vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
- msix_idx);
- vdev->vxge_entries[intr_cnt].in_use = 1;
- vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
- }
-
-INTA_MODE:
- if (vdev->config.intr_type == INTA) {
- snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
- "%s:vxge:INTA", vdev->ndev->name);
- vxge_hw_device_set_intr_type(vdev->devh,
- VXGE_HW_INTR_MODE_IRQLINE);
-
- vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
-
- ret = request_irq((int) vdev->pdev->irq,
- vxge_isr_napi,
- IRQF_SHARED, vdev->desc[0], vdev);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s %s-%d: ISR registration failed",
- VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
- return -ENODEV;
- }
- vxge_debug_init(VXGE_TRACE,
- "new %s-%d line allocated",
- "IRQ", vdev->pdev->irq);
- }
-
- return VXGE_HW_OK;
-}
-
-static void vxge_poll_vp_reset(struct timer_list *t)
-{
- struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer);
- int i, j = 0;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- if (test_bit(i, &vdev->vp_reset)) {
- vxge_reset_vpath(vdev, i);
- j++;
- }
- }
- if (j && (vdev->config.intr_type != MSI_X)) {
- vxge_hw_device_unmask_all(vdev->devh);
- vxge_hw_device_flush_io(vdev->devh);
- }
-
- mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
-}
-
-static void vxge_poll_vp_lockup(struct timer_list *t)
-{
- struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer);
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- struct vxge_ring *ring;
- int i;
- unsigned long rx_frms;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
-
- /* Truncated to machine word size number of frames */
- rx_frms = READ_ONCE(ring->stats.rx_frms);
-
- /* Did this vpath received any packets */
- if (ring->stats.prev_rx_frms == rx_frms) {
- status = vxge_hw_vpath_check_leak(ring->handle);
-
- /* Did it received any packets last time */
- if ((VXGE_HW_FAIL == status) &&
- (VXGE_HW_FAIL == ring->last_status)) {
-
- /* schedule vpath reset */
- if (!test_and_set_bit(i, &vdev->vp_reset)) {
- vpath = &vdev->vpaths[i];
-
- /* disable interrupts for this vpath */
- vxge_vpath_intr_disable(vdev, i);
-
- /* stop the queue for this vpath */
- netif_tx_stop_queue(vpath->fifo.txq);
- continue;
- }
- }
- }
- ring->stats.prev_rx_frms = rx_frms;
- ring->last_status = status;
- }
-
- /* Check every 1 milli second */
- mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
-}
-
-static netdev_features_t vxge_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- netdev_features_t changed = dev->features ^ features;
-
- /* Enabling RTH requires some of the logic in vxge_device_register and a
- * vpath reset. Due to these restrictions, only allow modification
- * while the interface is down.
- */
- if ((changed & NETIF_F_RXHASH) && netif_running(dev))
- features ^= NETIF_F_RXHASH;
-
- return features;
-}
-
-static int vxge_set_features(struct net_device *dev, netdev_features_t features)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- netdev_features_t changed = dev->features ^ features;
-
- if (!(changed & NETIF_F_RXHASH))
- return 0;
-
- /* !netif_running() ensured by vxge_fix_features() */
-
- vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
- vxge_reset_all_vpaths(vdev);
-
- return 0;
-}
-
-/**
- * vxge_open
- * @dev: pointer to the device structure.
- *
- * This function is the open entry point of the driver. It mainly calls a
- * function to allocate Rx buffers and inserts them into the buffer
- * descriptors and then enables the Rx part of the NIC.
- * Return value: '0' on success and an appropriate (-)ve integer as
- * defined in errno.h file on failure.
- */
-static int vxge_open(struct net_device *dev)
-{
- enum vxge_hw_status status;
- struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
- struct vxge_vpath *vpath;
- int ret = 0;
- int i;
- u64 val64;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d", dev->name, __func__, __LINE__);
-
- vdev = netdev_priv(dev);
- hldev = pci_get_drvdata(vdev->pdev);
-
- /* make sure you have link off by default every time Nic is
- * initialized */
- netif_carrier_off(dev);
-
- /* Open VPATHs */
- status = vxge_open_vpaths(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: Vpath open failed", vdev->ndev->name);
- ret = -EPERM;
- goto out0;
- }
-
- vdev->mtu = dev->mtu;
-
- status = vxge_add_isr(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: ISR add failed", dev->name);
- ret = -EPERM;
- goto out1;
- }
-
- if (vdev->config.intr_type != MSI_X) {
- netif_napi_add_weight(dev, &vdev->napi, vxge_poll_inta,
- vdev->config.napi_weight);
- napi_enable(&vdev->napi);
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vpath->ring.napi_p = &vdev->napi;
- }
- } else {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- netif_napi_add_weight(dev, &vpath->ring.napi,
- vxge_poll_msix,
- vdev->config.napi_weight);
- napi_enable(&vpath->ring.napi);
- vpath->ring.napi_p = &vpath->ring.napi;
- }
- }
-
- /* configure RTH */
- if (vdev->config.rth_steering) {
- status = vxge_rth_configure(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: RTH configuration failed",
- dev->name);
- ret = -EPERM;
- goto out2;
- }
- }
- printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
- hldev->config.rth_en ? "enabled" : "disabled");
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- /* set initial mtu before enabling the device */
- status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: can not set new MTU", dev->name);
- ret = -EPERM;
- goto out2;
- }
- }
-
- VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
- vxge_debug_init(vdev->level_trace,
- "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
- VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
-
- /* Restore the DA, VID table and also multicast and promiscuous mode
- * states
- */
- if (vdev->all_multi_flg) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_restore_vpath_mac_addr(vpath);
- vxge_restore_vpath_vid_table(vpath);
-
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling multicast failed",
- __func__, __LINE__);
- }
- }
-
- /* Enable vpath to sniff all unicast/multicast traffic that not
- * addressed to them. We allow promiscuous mode for PF only
- */
-
- val64 = 0;
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_addr),
- val64);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_vid),
- val64);
-
- vxge_set_multicast(dev);
-
- /* Enabling Bcast and mcast for all vpath */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- status = vxge_hw_vpath_bcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s : Can not enable bcast for vpath "
- "id %d", dev->name, i);
- if (vdev->config.addr_learn_en) {
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s : Can not enable mcast for vpath "
- "id %d", dev->name, i);
- }
- }
-
- vxge_hw_device_setpause_data(vdev->devh, 0,
- vdev->config.tx_pause_enable,
- vdev->config.rx_pause_enable);
-
- if (vdev->vp_reset_timer.function == NULL)
- vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset,
- HZ / 2);
-
- /* There is no need to check for RxD leak and RxD lookup on Titan1A */
- if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
- vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup,
- HZ / 2);
-
- set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- smp_wmb();
-
- if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
- netif_carrier_on(vdev->ndev);
- netdev_notice(vdev->ndev, "Link Up\n");
- vdev->stats.link_up++;
- }
-
- vxge_hw_device_intr_enable(vdev->devh);
-
- smp_wmb();
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- vxge_hw_vpath_enable(vpath->handle);
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vpath->handle);
- }
-
- netif_tx_start_all_queues(vdev->ndev);
-
- /* configure CI */
- vxge_config_ci_for_tti_rti(vdev);
-
- goto out0;
-
-out2:
- vxge_rem_isr(vdev);
-
- /* Disable napi */
- if (vdev->config.intr_type != MSI_X)
- napi_disable(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- napi_disable(&vdev->vpaths[i].ring.napi);
- }
-
-out1:
- vxge_close_vpaths(vdev, 0);
-out0:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- dev->name, __func__, __LINE__);
- return ret;
-}
-
-/* Loop through the mac address list and delete all the entries */
-static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
-{
-
- struct list_head *entry, *next;
- if (list_empty(&vpath->mac_addr_list))
- return;
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- list_del(entry);
- kfree(entry);
- }
-}
-
-static void vxge_napi_del_all(struct vxgedev *vdev)
-{
- int i;
- if (vdev->config.intr_type != MSI_X)
- netif_napi_del(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- netif_napi_del(&vdev->vpaths[i].ring.napi);
- }
-}
-
-static int do_vxge_close(struct net_device *dev, int do_io)
-{
- enum vxge_hw_status status;
- struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
- int i;
- u64 val64, vpath_vector;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- dev->name, __func__, __LINE__);
-
- vdev = netdev_priv(dev);
- hldev = pci_get_drvdata(vdev->pdev);
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* If vxge_handle_crit_err task is executing,
- * wait till it completes. */
- while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- msleep(50);
-
- if (do_io) {
- /* Put the vpath back in normal mode */
- vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
- status = vxge_hw_mgmt_reg_read(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- &val64);
- if (status == VXGE_HW_OK) {
- val64 &= ~vpath_vector;
- status = vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- val64);
- }
-
- /* Remove the function 0 from promiscuous mode */
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_addr),
- 0);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_vid),
- 0);
-
- smp_wmb();
- }
-
- if (vdev->titan1)
- del_timer_sync(&vdev->vp_lockup_timer);
-
- del_timer_sync(&vdev->vp_reset_timer);
-
- if (do_io)
- vxge_hw_device_wait_receive_idle(hldev);
-
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- /* Disable napi */
- if (vdev->config.intr_type != MSI_X)
- napi_disable(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- napi_disable(&vdev->vpaths[i].ring.napi);
- }
-
- netif_carrier_off(vdev->ndev);
- netdev_notice(vdev->ndev, "Link Down\n");
- netif_tx_stop_all_queues(vdev->ndev);
-
- /* Note that at this point xmit() is stopped by upper layer */
- if (do_io)
- vxge_hw_device_intr_disable(vdev->devh);
-
- vxge_rem_isr(vdev);
-
- vxge_napi_del_all(vdev);
-
- if (do_io)
- vxge_reset_all_vpaths(vdev);
-
- vxge_close_vpaths(vdev, 0);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
-
- clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
-
- return 0;
-}
-
-/**
- * vxge_close
- * @dev: device pointer.
- *
- * This is the stop entry point of the driver. It needs to undo exactly
- * whatever was done by the open entry point, thus it's usually referred to
- * as the close function.Among other things this function mainly stops the
- * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
- * Return value: '0' on success and an appropriate (-)ve integer as
- * defined in errno.h file on failure.
- */
-static int vxge_close(struct net_device *dev)
-{
- do_vxge_close(dev, 1);
- return 0;
-}
-
-/**
- * vxge_change_mtu
- * @dev: net device pointer.
- * @new_mtu :the new MTU size for the device.
- *
- * A driver entry point to change MTU size for the device. Before changing
- * the MTU the device must be stopped.
- */
-static int vxge_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s:%d", __func__, __LINE__);
-
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev))) {
- /* just store new value, will use later on open() */
- dev->mtu = new_mtu;
- vxge_debug_init(vdev->level_err,
- "%s", "device is down on MTU change");
- return 0;
- }
-
- vxge_debug_init(vdev->level_trace,
- "trying to apply new MTU %d", new_mtu);
-
- if (vxge_close(dev))
- return -EIO;
-
- dev->mtu = new_mtu;
- vdev->mtu = new_mtu;
-
- if (vxge_open(dev))
- return -EIO;
-
- vxge_debug_init(vdev->level_trace,
- "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s:%d Exiting...", __func__, __LINE__);
-
- return 0;
-}
-
-/**
- * vxge_get_stats64
- * @dev: pointer to the device structure
- * @net_stats: pointer to struct rtnl_link_stats64
- *
- */
-static void
-vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- int k;
-
- /* net_stats already zeroed by caller */
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
- struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
- unsigned int start;
- u64 packets, bytes, multicast;
-
- do {
- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
-
- packets = rxstats->rx_frms;
- multicast = rxstats->rx_mcast;
- bytes = rxstats->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
-
- net_stats->rx_packets += packets;
- net_stats->rx_bytes += bytes;
- net_stats->multicast += multicast;
-
- net_stats->rx_errors += rxstats->rx_errors;
- net_stats->rx_dropped += rxstats->rx_dropped;
-
- do {
- start = u64_stats_fetch_begin_irq(&txstats->syncp);
-
- packets = txstats->tx_frms;
- bytes = txstats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
-
- net_stats->tx_packets += packets;
- net_stats->tx_bytes += bytes;
- net_stats->tx_errors += txstats->tx_errors;
- }
-}
-
-static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
-{
- enum vxge_hw_status status;
- u64 val64;
-
- /* Timestamp is passed to the driver via the FCS, therefore we
- * must disable the FCS stripping by the adapter. Since this is
- * required for the driver to load (due to a hardware bug),
- * there is no need to do anything special here.
- */
- val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
- VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
- VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
-
- status = vxge_hw_mgmt_reg_write(devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- offsetof(struct vxge_hw_mrpcim_reg,
- xmac_timestamp),
- val64);
- vxge_hw_device_flush_io(devh);
- devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
- return status;
-}
-
-static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
-{
- struct hwtstamp_config config;
- int i;
-
- if (copy_from_user(&config, data, sizeof(config)))
- return -EFAULT;
-
- /* Transmit HW Timestamp not supported */
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
- case HWTSTAMP_TX_ON:
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- vdev->rx_hwts = 0;
- config.rx_filter = HWTSTAMP_FILTER_NONE;
- break;
-
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
- return -EFAULT;
-
- vdev->rx_hwts = 1;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
-
- default:
- return -ERANGE;
- }
-
- for (i = 0; i < vdev->no_of_vpath; i++)
- vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
-
- if (copy_to_user(data, &config, sizeof(config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
-{
- struct hwtstamp_config config;
-
- config.flags = 0;
- config.tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (vdev->rx_hwts ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
-
- if (copy_to_user(data, &config, sizeof(config)))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * vxge_ioctl
- * @dev: Device pointer.
- * @rq: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
- * @cmd: This is used to distinguish between the different commands that
- * can be passed to the IOCTL functions.
- *
- * Entry point for the Ioctl.
- */
-static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return vxge_hwtstamp_set(vdev, rq->ifr_data);
- case SIOCGHWTSTAMP:
- return vxge_hwtstamp_get(vdev, rq->ifr_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * vxge_tx_watchdog
- * @dev: pointer to net device structure
- * @txqueue: index of the hanging queue
- *
- * Watchdog for transmit side.
- * This function is triggered if the Tx Queue is stopped
- * for a pre-defined amount of time when the Interface is still up.
- */
-static void vxge_tx_watchdog(struct net_device *dev, unsigned int txqueue)
-{
- struct vxgedev *vdev;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
-
- schedule_work(&vdev->reset_task);
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-/**
- * vxge_vlan_rx_add_vid
- * @dev: net device pointer.
- * @proto: vlan protocol
- * @vid: vid
- *
- * Add the vlan id to the devices vlan id table
- */
-static int
-vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath;
- int vp_id;
-
- /* Add these vlan to the vid table */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vpath = &vdev->vpaths[vp_id];
- if (!vpath->is_open)
- continue;
- vxge_hw_vpath_vid_add(vpath->handle, vid);
- }
- set_bit(vid, vdev->active_vlans);
- return 0;
-}
-
-/**
- * vxge_vlan_rx_kill_vid
- * @dev: net device pointer.
- * @proto: vlan protocol
- * @vid: vid
- *
- * Remove the vlan id from the device's vlan id table
- */
-static int
-vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath;
- int vp_id;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- /* Delete this vlan from the vid table */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vpath = &vdev->vpaths[vp_id];
- if (!vpath->is_open)
- continue;
- vxge_hw_vpath_vid_delete(vpath->handle, vid);
- }
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
- clear_bit(vid, vdev->active_vlans);
- return 0;
-}
-
-static const struct net_device_ops vxge_netdev_ops = {
- .ndo_open = vxge_open,
- .ndo_stop = vxge_close,
- .ndo_get_stats64 = vxge_get_stats64,
- .ndo_start_xmit = vxge_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = vxge_set_multicast,
- .ndo_eth_ioctl = vxge_ioctl,
- .ndo_set_mac_address = vxge_set_mac_addr,
- .ndo_change_mtu = vxge_change_mtu,
- .ndo_fix_features = vxge_fix_features,
- .ndo_set_features = vxge_set_features,
- .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
- .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
- .ndo_tx_timeout = vxge_tx_watchdog,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = vxge_netpoll,
-#endif
-};
-
-static int vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config,
- int no_of_vpath, struct vxgedev **vdev_out)
-{
- struct net_device *ndev;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev;
- int ret = 0, no_of_queue = 1;
- u64 stat;
-
- *vdev_out = NULL;
- if (config->tx_steering_type)
- no_of_queue = no_of_vpath;
-
- ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
- no_of_queue);
- if (ndev == NULL) {
- vxge_debug_init(
- vxge_hw_device_trace_level_get(hldev),
- "%s : device allocation failed", __func__);
- ret = -ENODEV;
- goto _out0;
- }
-
- vxge_debug_entryexit(
- vxge_hw_device_trace_level_get(hldev),
- "%s: %s:%d Entering...",
- ndev->name, __func__, __LINE__);
-
- vdev = netdev_priv(ndev);
- memset(vdev, 0, sizeof(struct vxgedev));
-
- vdev->ndev = ndev;
- vdev->devh = hldev;
- vdev->pdev = hldev->pdev;
- memcpy(&vdev->config, config, sizeof(struct vxge_config));
- vdev->rx_hwts = 0;
- vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
-
- SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
-
- ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_HW_VLAN_CTAG_TX;
- if (vdev->config.rth_steering != NO_STEERING)
- ndev->hw_features |= NETIF_F_RXHASH;
-
- ndev->features |= ndev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
-
-
- ndev->netdev_ops = &vxge_netdev_ops;
-
- ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
- INIT_WORK(&vdev->reset_task, vxge_reset);
-
- vxge_initialize_ethtool_ops(ndev);
-
- /* Allocate memory for vpath */
- vdev->vpaths = kcalloc(no_of_vpath, sizeof(struct vxge_vpath),
- GFP_KERNEL);
- if (!vdev->vpaths) {
- vxge_debug_init(VXGE_ERR,
- "%s: vpath memory allocation failed",
- vdev->ndev->name);
- ret = -ENOMEM;
- goto _out1;
- }
-
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s : checksumming enabled", __func__);
-
- ndev->features |= NETIF_F_HIGHDMA;
-
- /* MTU range: 68 - 9600 */
- ndev->min_mtu = VXGE_HW_MIN_MTU;
- ndev->max_mtu = VXGE_HW_MAX_MTU;
-
- ret = register_netdev(ndev);
- if (ret) {
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s: %s : device registration failed!",
- ndev->name, __func__);
- goto _out2;
- }
-
- /* Set the factory defined MAC address initially */
- ndev->addr_len = ETH_ALEN;
-
- /* Make Link state as off at this point, when the Link change
- * interrupt comes the state will be automatically changed to
- * the right state.
- */
- netif_carrier_off(ndev);
-
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s: Ethernet device registered",
- ndev->name);
-
- hldev->ndev = ndev;
- *vdev_out = vdev;
-
- /* Resetting the Device stats */
- status = vxge_hw_mrpcim_stats_access(
- hldev,
- VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
- 0,
- 0,
- &stat);
-
- if (status == VXGE_HW_ERR_PRIVILEGED_OPERATION)
- vxge_debug_init(
- vxge_hw_device_trace_level_get(hldev),
- "%s: device stats clear returns"
- "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev->name);
-
- vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
- "%s: %s:%d Exiting...",
- ndev->name, __func__, __LINE__);
-
- return ret;
-_out2:
- kfree(vdev->vpaths);
-_out1:
- free_netdev(ndev);
-_out0:
- return ret;
-}
-
-/*
- * vxge_device_unregister
- *
- * This function will unregister and free network device
- */
-static void vxge_device_unregister(struct __vxge_hw_device *hldev)
-{
- struct vxgedev *vdev;
- struct net_device *dev;
- char buf[IFNAMSIZ];
-
- dev = hldev->ndev;
- vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
- __func__, __LINE__);
-
- strlcpy(buf, dev->name, IFNAMSIZ);
-
- flush_work(&vdev->reset_task);
-
- /* in 2.6 will call stop() if device is up */
- unregister_netdev(dev);
-
- kfree(vdev->vpaths);
-
- vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
- buf);
- vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
- __func__, __LINE__);
-
- /* we are safe to free it now */
- free_netdev(dev);
-}
-
-/*
- * vxge_callback_crit_err
- *
- * This function is called by the alarm handler in interrupt context.
- * Driver must analyze it based on the event type.
- */
-static void
-vxge_callback_crit_err(struct __vxge_hw_device *hldev,
- enum vxge_hw_event type, u64 vp_id)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath = NULL;
- int vpath_idx;
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
-
- /* Note: This event type should be used for device wide
- * indications only - Serious errors, Slot freeze and critical errors
- */
- vdev->cric_err_event = type;
-
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->device_id == vp_id)
- break;
- }
-
- if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
- if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
- vxge_debug_init(VXGE_ERR,
- "%s: Slot is frozen", vdev->ndev->name);
- } else if (type == VXGE_HW_EVENT_SERR) {
- vxge_debug_init(VXGE_ERR,
- "%s: Encountered Serious Error",
- vdev->ndev->name);
- } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
- vxge_debug_init(VXGE_ERR,
- "%s: Encountered Critical Error",
- vdev->ndev->name);
- }
-
- if ((type == VXGE_HW_EVENT_SERR) ||
- (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
- vxge_hw_device_mask_all(hldev);
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
- (type == VXGE_HW_EVENT_VPATH_ERR)) {
-
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- else {
- /* check if this vpath is already set for reset */
- if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
-
- /* disable interrupts for this vpath */
- vxge_vpath_intr_disable(vdev, vpath_idx);
-
- /* stop the queue for this vpath */
- netif_tx_stop_queue(vpath->fifo.txq);
- }
- }
- }
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s: %s:%d Exiting...",
- vdev->ndev->name, __func__, __LINE__);
-}
-
-static void verify_bandwidth(void)
-{
- int i, band_width, total = 0, equal_priority = 0;
-
- /* 1. If user enters 0 for some fifo, give equal priority to all */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (bw_percentage[i] == 0) {
- equal_priority = 1;
- break;
- }
- }
-
- if (!equal_priority) {
- /* 2. If sum exceeds 100, give equal priority to all */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (bw_percentage[i] == 0xFF)
- break;
-
- total += bw_percentage[i];
- if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
- equal_priority = 1;
- break;
- }
- }
- }
-
- if (!equal_priority) {
- /* Is all the bandwidth consumed? */
- if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
- if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
- /* Split rest of bw equally among next VPs*/
- band_width =
- (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
- (VXGE_HW_MAX_VIRTUAL_PATHS - i);
- if (band_width < 2) /* min of 2% */
- equal_priority = 1;
- else {
- for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
- i++)
- bw_percentage[i] =
- band_width;
- }
- }
- } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
- equal_priority = 1;
- }
-
- if (equal_priority) {
- vxge_debug_init(VXGE_ERR,
- "%s: Assigning equal bandwidth to all the vpaths",
- VXGE_DRIVER_NAME);
- bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
- VXGE_HW_MAX_VIRTUAL_PATHS;
- for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- bw_percentage[i] = bw_percentage[0];
- }
-}
-
-/*
- * Vpath configuration
- */
-static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
- u64 vpath_mask, struct vxge_config *config_param)
-{
- int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
- u32 txdl_size, txdl_per_memblock;
-
- temp = driver_config->vpath_per_dev;
- if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
- (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
- /* No more CPU. Return vpath number as zero.*/
- if (driver_config->g_no_cpus == -1)
- return 0;
-
- if (!driver_config->g_no_cpus)
- driver_config->g_no_cpus =
- netif_get_num_default_rss_queues();
-
- driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
- if (!driver_config->vpath_per_dev)
- driver_config->vpath_per_dev = 1;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- if (vxge_bVALn(vpath_mask, i, 1))
- default_no_vpath++;
-
- if (default_no_vpath < driver_config->vpath_per_dev)
- driver_config->vpath_per_dev = default_no_vpath;
-
- driver_config->g_no_cpus = driver_config->g_no_cpus -
- (driver_config->vpath_per_dev * 2);
- if (driver_config->g_no_cpus <= 0)
- driver_config->g_no_cpus = -1;
- }
-
- if (driver_config->vpath_per_dev == 1) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: Disable tx and rx steering, "
- "as single vpath is configured", VXGE_DRIVER_NAME);
- config_param->rth_steering = NO_STEERING;
- config_param->tx_steering_type = NO_STEERING;
- device_config->rth_en = 0;
- }
-
- /* configure bandwidth */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- device_config->vp_config[i].min_bandwidth = bw_percentage[i];
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- device_config->vp_config[i].vp_id = i;
- device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
- if (no_of_vpaths < driver_config->vpath_per_dev) {
- if (!vxge_bVALn(vpath_mask, i, 1)) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d is not available",
- VXGE_DRIVER_NAME, i);
- continue;
- } else {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d available",
- VXGE_DRIVER_NAME, i);
- no_of_vpaths++;
- }
- } else {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d is not configured, "
- "max_config_vpath exceeded",
- VXGE_DRIVER_NAME, i);
- break;
- }
-
- /* Configure Tx fifo's */
- device_config->vp_config[i].fifo.enable =
- VXGE_HW_FIFO_ENABLE;
- device_config->vp_config[i].fifo.max_frags =
- MAX_SKB_FRAGS + 1;
- device_config->vp_config[i].fifo.memblock_size =
- VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
-
- txdl_size = device_config->vp_config[i].fifo.max_frags *
- sizeof(struct vxge_hw_fifo_txd);
- txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
-
- device_config->vp_config[i].fifo.fifo_blocks =
- ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
-
- device_config->vp_config[i].fifo.intr =
- VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
-
- /* Configure tti properties */
- device_config->vp_config[i].tti.intr_enable =
- VXGE_HW_TIM_INTR_ENABLE;
-
- device_config->vp_config[i].tti.btimer_val =
- (VXGE_TTI_BTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.timer_ac_en =
- VXGE_HW_TIM_TIMER_AC_ENABLE;
-
- /* For msi-x with napi (each vector has a handler of its own) -
- * Set CI to OFF for all vpaths
- */
- device_config->vp_config[i].tti.timer_ci_en =
- VXGE_HW_TIM_TIMER_CI_DISABLE;
-
- device_config->vp_config[i].tti.timer_ri_en =
- VXGE_HW_TIM_TIMER_RI_DISABLE;
-
- device_config->vp_config[i].tti.util_sel =
- VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
-
- device_config->vp_config[i].tti.ltimer_val =
- (VXGE_TTI_LTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.rtimer_val =
- (VXGE_TTI_RTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
- device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
- device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
- device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
- device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
- device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
- device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
-
- /* Configure Rx rings */
- device_config->vp_config[i].ring.enable =
- VXGE_HW_RING_ENABLE;
-
- device_config->vp_config[i].ring.ring_blocks =
- VXGE_HW_DEF_RING_BLOCKS;
-
- device_config->vp_config[i].ring.buffer_mode =
- VXGE_HW_RING_RXD_BUFFER_MODE_1;
-
- device_config->vp_config[i].ring.rxds_limit =
- VXGE_HW_DEF_RING_RXDS_LIMIT;
-
- device_config->vp_config[i].ring.scatter_mode =
- VXGE_HW_RING_SCATTER_MODE_A;
-
- /* Configure rti properties */
- device_config->vp_config[i].rti.intr_enable =
- VXGE_HW_TIM_INTR_ENABLE;
-
- device_config->vp_config[i].rti.btimer_val =
- (VXGE_RTI_BTIMER_VAL * 1000)/272;
-
- device_config->vp_config[i].rti.timer_ac_en =
- VXGE_HW_TIM_TIMER_AC_ENABLE;
-
- device_config->vp_config[i].rti.timer_ci_en =
- VXGE_HW_TIM_TIMER_CI_DISABLE;
-
- device_config->vp_config[i].rti.timer_ri_en =
- VXGE_HW_TIM_TIMER_RI_DISABLE;
-
- device_config->vp_config[i].rti.util_sel =
- VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
-
- device_config->vp_config[i].rti.urange_a =
- RTI_RX_URANGE_A;
- device_config->vp_config[i].rti.urange_b =
- RTI_RX_URANGE_B;
- device_config->vp_config[i].rti.urange_c =
- RTI_RX_URANGE_C;
- device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
- device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
- device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
- device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
-
- device_config->vp_config[i].rti.rtimer_val =
- (VXGE_RTI_RTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].rti.ltimer_val =
- (VXGE_RTI_LTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].rpa_strip_vlan_tag =
- vlan_tag_strip;
- }
-
- driver_config->vpath_per_dev = temp;
- return no_of_vpaths;
-}
-
-/* initialize device configuratrions */
-static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
- int *intr_type)
-{
- /* Used for CQRQ/SRQ. */
- device_config->dma_blockpool_initial =
- VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
-
- device_config->dma_blockpool_max =
- VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
-
- if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
- max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
-
- if (!IS_ENABLED(CONFIG_PCI_MSI)) {
- vxge_debug_init(VXGE_ERR,
- "%s: This Kernel does not support "
- "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
- *intr_type = INTA;
- }
-
- /* Configure whether MSI-X or IRQL. */
- switch (*intr_type) {
- case INTA:
- device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
- break;
-
- case MSI_X:
- device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
- break;
- }
-
- /* Timer period between device poll */
- device_config->device_poll_millis = VXGE_TIMER_DELAY;
-
- /* Configure mac based steering. */
- device_config->rts_mac_en = addr_learn_en;
-
- /* Configure Vpaths */
- device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
-
- vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
- __func__);
- vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
- device_config->intr_mode);
- vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
- device_config->device_poll_millis);
- vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
- device_config->rth_en);
- vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
- device_config->rth_it_type);
-}
-
-static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
-{
- int i;
-
- vxge_debug_init(VXGE_TRACE,
- "%s: %d Vpath(s) opened",
- vdev->ndev->name, vdev->no_of_vpath);
-
- switch (vdev->config.intr_type) {
- case INTA:
- vxge_debug_init(VXGE_TRACE,
- "%s: Interrupt type INTA", vdev->ndev->name);
- break;
-
- case MSI_X:
- vxge_debug_init(VXGE_TRACE,
- "%s: Interrupt type MSI-X", vdev->ndev->name);
- break;
- }
-
- if (vdev->config.rth_steering) {
- vxge_debug_init(VXGE_TRACE,
- "%s: RTH steering enabled for TCP_IPV4",
- vdev->ndev->name);
- } else {
- vxge_debug_init(VXGE_TRACE,
- "%s: RTH steering disabled", vdev->ndev->name);
- }
-
- switch (vdev->config.tx_steering_type) {
- case NO_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- break;
- case TX_PRIORITY_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Unsupported tx steering option",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- break;
- case TX_VLAN_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Unsupported tx steering option",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- break;
- case TX_MULTIQ_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx multiqueue steering enabled",
- vdev->ndev->name);
- break;
- case TX_PORT_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx port steering enabled",
- vdev->ndev->name);
- break;
- default:
- vxge_debug_init(VXGE_ERR,
- "%s: Unsupported tx steering type",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- }
-
- if (vdev->config.addr_learn_en)
- vxge_debug_init(VXGE_TRACE,
- "%s: MAC Address learning enabled", vdev->ndev->name);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!vxge_bVALn(vpath_mask, i, 1))
- continue;
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: MTU size - %d", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].mtu);
- vxge_debug_init(VXGE_TRACE,
- "%s: VLAN tag stripping %s", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].rpa_strip_vlan_tag
- ? "Enabled" : "Disabled");
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: Max frags : %d", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].fifo.max_frags);
- break;
- }
-}
-
-/**
- * vxge_pm_suspend - vxge power management suspend entry point
- * @dev_d: device pointer
- *
- */
-static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
-{
- return -ENOSYS;
-}
-/**
- * vxge_pm_resume - vxge power management resume entry point
- * @dev_d: device pointer
- *
- */
-static int __maybe_unused vxge_pm_resume(struct device *dev_d)
-{
- return -ENOSYS;
-}
-
-/**
- * vxge_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev)) {
- /* Bring down the card, while avoiding PCI I/O */
- do_vxge_close(netdev, 0);
- }
-
- pci_disable_device(pdev);
-
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * vxge_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
- * followed by fixups by BIOS, and has its config space
- * set up identically to what it was at cold boot.
- */
-static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- struct vxgedev *vdev = netdev_priv(netdev);
-
- if (pci_enable_device(pdev)) {
- netdev_err(netdev, "Cannot re-enable device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- pci_set_master(pdev);
- do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * vxge_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells
- * us that its OK to resume normal operation.
- */
-static void vxge_io_resume(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- if (netif_running(netdev)) {
- if (vxge_open(netdev)) {
- netdev_err(netdev,
- "Can't bring device back up after reset\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
-}
-
-static inline u32 vxge_get_num_vfs(u64 function_mode)
-{
- u32 num_functions = 0;
-
- switch (function_mode) {
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
- case VXGE_HW_FUNCTION_MODE_SRIOV_8:
- num_functions = 8;
- break;
- case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
- num_functions = 1;
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV:
- case VXGE_HW_FUNCTION_MODE_MRIOV:
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
- num_functions = 17;
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV_4:
- num_functions = 4;
- break;
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
- num_functions = 2;
- break;
- case VXGE_HW_FUNCTION_MODE_MRIOV_8:
- num_functions = 8; /* TODO */
- break;
- }
- return num_functions;
-}
-
-int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
-{
- struct __vxge_hw_device *hldev = vdev->devh;
- u32 maj, min, bld, cmaj, cmin, cbld;
- enum vxge_hw_status status;
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
- if (ret) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
- VXGE_DRIVER_NAME, fw_name);
- goto out;
- }
-
- /* Load the new firmware onto the adapter */
- status = vxge_update_fw_image(hldev, fw->data, fw->size);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: FW image download to adapter failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- /* Read the version of the new firmware */
- status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: Upgrade read version failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- cmaj = vdev->config.device_hw_info.fw_version.major;
- cmin = vdev->config.device_hw_info.fw_version.minor;
- cbld = vdev->config.device_hw_info.fw_version.build;
- /* It's possible the version in /lib/firmware is not the latest version.
- * If so, we could get into a loop of trying to upgrade to the latest
- * and flashing the older version.
- */
- if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
- !override) {
- ret = -EINVAL;
- goto out;
- }
-
- printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
- maj, min, bld);
-
- /* Flash the adapter with the new firmware */
- status = vxge_hw_flash_fw(hldev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
- "hard reset before using, thus requiring a system reboot or a "
- "hotplug event.\n");
-
-out:
- release_firmware(fw);
- return ret;
-}
-
-static int vxge_probe_fw_update(struct vxgedev *vdev)
-{
- u32 maj, min, bld;
- int ret, gpxe = 0;
- char *fw_name;
-
- maj = vdev->config.device_hw_info.fw_version.major;
- min = vdev->config.device_hw_info.fw_version.minor;
- bld = vdev->config.device_hw_info.fw_version.build;
-
- if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
- return 0;
-
- /* Ignore the build number when determining if the current firmware is
- * "too new" to load the driver
- */
- if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
- "version, unable to load driver\n",
- VXGE_DRIVER_NAME);
- return -EINVAL;
- }
-
- /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
- * work with this driver.
- */
- if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
- "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
- return -EINVAL;
- }
-
- /* If file not specified, determine gPXE or not */
- if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
- int i;
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
- if (vdev->devh->eprom_versions[i]) {
- gpxe = 1;
- break;
- }
- }
- if (gpxe)
- fw_name = "vxge/X3fw-pxe.ncf";
- else
- fw_name = "vxge/X3fw.ncf";
-
- ret = vxge_fw_upgrade(vdev, fw_name, 0);
- /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
- * probe, so ignore them
- */
- if (ret != -EINVAL && ret != -ENOENT)
- return -EIO;
- else
- ret = 0;
-
- if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
- VXGE_FW_VER(maj, min, 0)) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
- " be used with this driver.",
- VXGE_DRIVER_NAME, maj, min, bld);
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int is_sriov_initialized(struct pci_dev *pdev)
-{
- int pos;
- u16 ctrl;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
- if (ctrl & PCI_SRIOV_CTRL_VFE)
- return 1;
- }
- return 0;
-}
-
-static const struct vxge_hw_uld_cbs vxge_callbacks = {
- .link_up = vxge_callback_link_up,
- .link_down = vxge_callback_link_down,
- .crit_err = vxge_callback_crit_err,
-};
-
-/**
- * vxge_probe
- * @pdev : structure containing the PCI related information of the device.
- * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
- * Description:
- * This function is called when a new PCI device gets detected and initializes
- * it.
- * Return value:
- * returns 0 on success and negative on failure.
- *
- */
-static int
-vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
-{
- struct __vxge_hw_device *hldev;
- enum vxge_hw_status status;
- int ret;
- u64 vpath_mask = 0;
- struct vxgedev *vdev;
- struct vxge_config *ll_config = NULL;
- struct vxge_hw_device_config *device_config = NULL;
- struct vxge_hw_device_attr attr;
- int i, j, no_of_vpath = 0, max_vpath_supported = 0;
- u8 *macaddr;
- struct vxge_mac_addrs *entry;
- static int bus = -1, device = -1;
- u32 host_type;
- u8 new_device = 0;
- enum vxge_hw_status is_privileged;
- u32 function_mode;
- u32 num_vfs = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- attr.pdev = pdev;
-
- /* In SRIOV-17 mode, functions of the same adapter
- * can be deployed on different buses
- */
- if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
- !pdev->is_virtfn)
- new_device = 1;
-
- bus = pdev->bus->number;
- device = PCI_SLOT(pdev->devfn);
-
- if (new_device) {
- if (driver_config->config_dev_cnt &&
- (driver_config->config_dev_cnt !=
- driver_config->total_dev_cnt))
- vxge_debug_init(VXGE_ERR,
- "%s: Configured %d of %d devices",
- VXGE_DRIVER_NAME,
- driver_config->config_dev_cnt,
- driver_config->total_dev_cnt);
- driver_config->config_dev_cnt = 0;
- driver_config->total_dev_cnt = 0;
- }
-
- /* Now making the CPU based no of vpath calculation
- * applicable for individual functions as well.
- */
- driver_config->g_no_cpus = 0;
- driver_config->vpath_per_dev = max_config_vpath;
-
- driver_config->total_dev_cnt++;
- if (++driver_config->config_dev_cnt > max_config_dev) {
- ret = 0;
- goto _exit0;
- }
-
- device_config = kzalloc(sizeof(struct vxge_hw_device_config),
- GFP_KERNEL);
- if (!device_config) {
- ret = -ENOMEM;
- vxge_debug_init(VXGE_ERR,
- "device_config : malloc failed %s %d",
- __FILE__, __LINE__);
- goto _exit0;
- }
-
- ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
- if (!ll_config) {
- ret = -ENOMEM;
- vxge_debug_init(VXGE_ERR,
- "device_config : malloc failed %s %d",
- __FILE__, __LINE__);
- goto _exit0;
- }
- ll_config->tx_steering_type = TX_MULTIQ_STEERING;
- ll_config->intr_type = MSI_X;
- ll_config->napi_weight = NAPI_POLL_WEIGHT;
- ll_config->rth_steering = RTH_STEERING;
-
- /* get the default configuration parameters */
- vxge_hw_device_config_default_get(device_config);
-
- /* initialize configuration parameters */
- vxge_device_config_init(device_config, &ll_config->intr_type);
-
- ret = pci_enable_device(pdev);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s : can not enable PCI device", __func__);
- goto _exit0;
- }
-
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s : using 64bit DMA", __func__);
- } else {
- ret = -ENOMEM;
- goto _exit1;
- }
-
- ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s : request regions failed", __func__);
- goto _exit1;
- }
-
- pci_set_master(pdev);
-
- attr.bar0 = pci_ioremap_bar(pdev, 0);
- if (!attr.bar0) {
- vxge_debug_init(VXGE_ERR,
- "%s : cannot remap io memory bar0", __func__);
- ret = -ENODEV;
- goto _exit2;
- }
- vxge_debug_ll_config(VXGE_TRACE,
- "pci ioremap bar0: %p:0x%llx",
- attr.bar0,
- (unsigned long long)pci_resource_start(pdev, 0));
-
- status = vxge_hw_device_hw_info_get(attr.bar0,
- &ll_config->device_hw_info);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: Reading of hardware info failed."
- "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
- vpath_mask = ll_config->device_hw_info.vpath_mask;
- if (vpath_mask == 0) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: No vpaths available in device", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
- vxge_debug_ll_config(VXGE_TRACE,
- "%s:%d Vpath mask = %llx", __func__, __LINE__,
- (unsigned long long)vpath_mask);
-
- function_mode = ll_config->device_hw_info.function_mode;
- host_type = ll_config->device_hw_info.host_type;
- is_privileged = __vxge_hw_device_is_privilaged(host_type,
- ll_config->device_hw_info.func_id);
-
- /* Check how many vpaths are available */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((vpath_mask) & vxge_mBIT(i)))
- continue;
- max_vpath_supported++;
- }
-
- if (new_device)
- num_vfs = vxge_get_num_vfs(function_mode) - 1;
-
- /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
- (ll_config->intr_type != INTA)) {
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret)
- vxge_debug_ll_config(VXGE_ERR,
- "Failed in enabling SRIOV mode: %d\n", ret);
- /* No need to fail out, as an error here is non-fatal */
- }
-
- /*
- * Configure vpaths and get driver configured number of vpaths
- * which is less than or equal to the maximum vpaths per function.
- */
- no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
- if (!no_of_vpath) {
- vxge_debug_ll_config(VXGE_ERR,
- "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
- ret = 0;
- goto _exit3;
- }
-
- /* Setting driver callbacks */
- attr.uld_callbacks = &vxge_callbacks;
-
- status = vxge_hw_device_initialize(&hldev, &attr, device_config);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "Failed to initialize device (%d)", status);
- ret = -EINVAL;
- goto _exit3;
- }
-
- if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
- ll_config->device_hw_info.fw_version.minor,
- ll_config->device_hw_info.fw_version.build) >=
- VXGE_EPROM_FW_VER) {
- struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
-
- status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
- VXGE_DRIVER_NAME);
- /* This is a non-fatal error, continue */
- }
-
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
- hldev->eprom_versions[i] = img[i].version;
- if (!img[i].is_valid)
- break;
- vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
- "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
- VXGE_EPROM_IMG_MAJOR(img[i].version),
- VXGE_EPROM_IMG_MINOR(img[i].version),
- VXGE_EPROM_IMG_FIX(img[i].version),
- VXGE_EPROM_IMG_BUILD(img[i].version));
- }
- }
-
- /* if FCS stripping is not disabled in MAC fail driver load */
- status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
- " failing driver load", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit4;
- }
-
- /* Always enable HWTS. This will always cause the FCS to be invalid,
- * due to the fact that HWTS is using the FCS as the location of the
- * timestamp. The HW FCS checking will still correctly determine if
- * there is a valid checksum, and the FCS is being removed by the driver
- * anyway. So no functionality is being lost. Since it is always
- * enabled, we now simply use the ioctl call to set whether or not the
- * driver should be paying attention to the HWTS.
- */
- if (is_privileged == VXGE_HW_OK) {
- status = vxge_timestamp_config(hldev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
- VXGE_DRIVER_NAME);
- ret = -EFAULT;
- goto _exit4;
- }
- }
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
-
- /* set private device info */
- pci_set_drvdata(pdev, hldev);
-
- ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
- ll_config->addr_learn_en = addr_learn_en;
- ll_config->rth_algorithm = RTH_ALG_JENKINS;
- ll_config->rth_hash_type_tcpipv4 = 1;
- ll_config->rth_hash_type_ipv4 = 0;
- ll_config->rth_hash_type_tcpipv6 = 0;
- ll_config->rth_hash_type_ipv6 = 0;
- ll_config->rth_hash_type_tcpipv6ex = 0;
- ll_config->rth_hash_type_ipv6ex = 0;
- ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
- ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
-
- ret = vxge_device_register(hldev, ll_config, no_of_vpath, &vdev);
- if (ret) {
- ret = -EINVAL;
- goto _exit4;
- }
-
- ret = vxge_probe_fw_update(vdev);
- if (ret)
- goto _exit5;
-
- vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
- vxge_hw_device_trace_level_get(hldev));
-
- /* set private HW device info */
- vdev->mtu = VXGE_HW_DEFAULT_MTU;
- vdev->bar0 = attr.bar0;
- vdev->max_vpath_supported = max_vpath_supported;
- vdev->no_of_vpath = no_of_vpath;
-
- /* Virtual Path count */
- for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!vxge_bVALn(vpath_mask, i, 1))
- continue;
- if (j >= vdev->no_of_vpath)
- break;
-
- vdev->vpaths[j].is_configured = 1;
- vdev->vpaths[j].device_id = i;
- vdev->vpaths[j].ring.driver_id = j;
- vdev->vpaths[j].vdev = vdev;
- vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
- memcpy((u8 *)vdev->vpaths[j].macaddr,
- ll_config->device_hw_info.mac_addrs[i],
- ETH_ALEN);
-
- /* Initialize the mac address list header */
- INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
-
- vdev->vpaths[j].mac_addr_cnt = 0;
- vdev->vpaths[j].mcast_addr_cnt = 0;
- j++;
- }
- vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
- vdev->max_config_port = max_config_port;
-
- vdev->vlan_tag_strip = vlan_tag_strip;
-
- /* map the hashing selector table to the configured vpaths */
- for (i = 0; i < vdev->no_of_vpath; i++)
- vdev->vpath_selector[i] = vpath_selector[i];
-
- macaddr = (u8 *)vdev->vpaths[0].macaddr;
-
- ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
-
- vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
- vdev->ndev->name, ll_config->device_hw_info.serial_number);
-
- vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
- vdev->ndev->name, ll_config->device_hw_info.part_number);
-
- vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
- vdev->ndev->name, ll_config->device_hw_info.product_desc);
-
- vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
- vdev->ndev->name, macaddr);
-
- vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
- vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
-
- vxge_debug_init(VXGE_TRACE,
- "%s: Firmware version : %s Date : %s", vdev->ndev->name,
- ll_config->device_hw_info.fw_version.version,
- ll_config->device_hw_info.fw_date.date);
-
- if (new_device) {
- switch (ll_config->device_hw_info.function_mode) {
- case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
- vxge_debug_init(VXGE_TRACE,
- "%s: Single Function Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
- vxge_debug_init(VXGE_TRACE,
- "%s: Multi Function Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV:
- vxge_debug_init(VXGE_TRACE,
- "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_MRIOV:
- vxge_debug_init(VXGE_TRACE,
- "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
- break;
- }
- }
-
- vxge_print_parm(vdev, vpath_mask);
-
- /* Store the fw version for ethttool option */
- strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
- eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr);
-
- /* Copy the station mac address to the list */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
- if (NULL == entry) {
- vxge_debug_init(VXGE_ERR,
- "%s: mac_addr_list : memory allocation failed",
- vdev->ndev->name);
- ret = -EPERM;
- goto _exit6;
- }
- macaddr = (u8 *)&entry->macaddr;
- memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
- list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
- vdev->vpaths[i].mac_addr_cnt = 1;
- }
-
- kfree(device_config);
-
- /*
- * INTA is shared in multi-function mode. This is unlike the INTA
- * implementation in MR mode, where each VH has its own INTA message.
- * - INTA is masked (disabled) as long as at least one function sets
- * its TITAN_MASK_ALL_INT.ALARM bit.
- * - INTA is unmasked (enabled) when all enabled functions have cleared
- * their own TITAN_MASK_ALL_INT.ALARM bit.
- * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
- * Though this driver leaves the top level interrupts unmasked while
- * leaving the required module interrupt bits masked on exit, there
- * could be a rougue driver around that does not follow this procedure
- * resulting in a failure to generate interrupts. The following code is
- * present to prevent such a failure.
- */
-
- if (ll_config->device_hw_info.function_mode ==
- VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
- if (vdev->config.intr_type == INTA)
- vxge_hw_device_unmask_all(hldev);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
- vdev->ndev->name, __func__, __LINE__);
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
- vxge_hw_device_trace_level_get(hldev));
-
- kfree(ll_config);
- return 0;
-
-_exit6:
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_free_mac_add_list(&vdev->vpaths[i]);
-_exit5:
- vxge_device_unregister(hldev);
-_exit4:
- vxge_hw_device_terminate(hldev);
- pci_disable_sriov(pdev);
-_exit3:
- iounmap(attr.bar0);
-_exit2:
- pci_release_region(pdev, 0);
-_exit1:
- pci_disable_device(pdev);
-_exit0:
- kfree(ll_config);
- kfree(device_config);
- driver_config->config_dev_cnt--;
- driver_config->total_dev_cnt--;
- return ret;
-}
-
-/**
- * vxge_remove - Free the PCI device
- * @pdev: structure containing the PCI related information of the device.
- * Description: This function is called by the Pci subsystem to release a
- * PCI device and free up all resource held up by the device.
- */
-static void vxge_remove(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev;
- struct vxgedev *vdev;
- int i;
-
- hldev = pci_get_drvdata(pdev);
- if (hldev == NULL)
- return;
-
- vdev = netdev_priv(hldev->ndev);
-
- vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
- vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
- __func__);
-
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_free_mac_add_list(&vdev->vpaths[i]);
-
- vxge_device_unregister(hldev);
- /* Do not call pci_disable_sriov here, as it will break child devices */
- vxge_hw_device_terminate(hldev);
- iounmap(vdev->bar0);
- pci_release_region(pdev, 0);
- pci_disable_device(pdev);
- driver_config->config_dev_cnt--;
- driver_config->total_dev_cnt--;
-
- vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
- __func__, __LINE__);
- vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
- __LINE__);
-}
-
-static const struct pci_error_handlers vxge_err_handler = {
- .error_detected = vxge_io_error_detected,
- .slot_reset = vxge_io_slot_reset,
- .resume = vxge_io_resume,
-};
-
-static SIMPLE_DEV_PM_OPS(vxge_pm_ops, vxge_pm_suspend, vxge_pm_resume);
-
-static struct pci_driver vxge_driver = {
- .name = VXGE_DRIVER_NAME,
- .id_table = vxge_id_table,
- .probe = vxge_probe,
- .remove = vxge_remove,
- .driver.pm = &vxge_pm_ops,
- .err_handler = &vxge_err_handler,
-};
-
-static int __init
-vxge_starter(void)
-{
- int ret = 0;
-
- pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
- pr_info("Driver version: %s\n", DRV_VERSION);
-
- verify_bandwidth();
-
- driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
- if (!driver_config)
- return -ENOMEM;
-
- ret = pci_register_driver(&vxge_driver);
- if (ret) {
- kfree(driver_config);
- goto err;
- }
-
- if (driver_config->config_dev_cnt &&
- (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
- vxge_debug_init(VXGE_ERR,
- "%s: Configured %d of %d devices",
- VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
- driver_config->total_dev_cnt);
-err:
- return ret;
-}
-
-static void __exit
-vxge_closer(void)
-{
- pci_unregister_driver(&vxge_driver);
- kfree(driver_config);
-}
-module_init(vxge_starter);
-module_exit(vxge_closer);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
deleted file mode 100644
index da9d2c191828..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ /dev/null
@@ -1,516 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_MAIN_H
-#define VXGE_MAIN_H
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-version.h"
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
-
-#define VXGE_DRIVER_NAME "vxge"
-#define VXGE_DRIVER_VENDOR "Neterion, Inc"
-#define VXGE_DRIVER_FW_VERSION_MAJOR 1
-
-#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
- VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
- VXGE_VERSION_FOR
-
-#define PCI_DEVICE_ID_TITAN_WIN 0x5733
-#define PCI_DEVICE_ID_TITAN_UNI 0x5833
-#define VXGE_HW_TITAN1_PCI_REVISION 1
-#define VXGE_HW_TITAN1A_PCI_REVISION 2
-
-#define VXGE_USE_DEFAULT 0xffffffff
-#define VXGE_HW_VPATH_MSIX_ACTIVE 4
-#define VXGE_ALARM_MSIX_ID 2
-#define VXGE_HW_RXSYNC_FREQ_CNT 4
-#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
-#define VXGE_LL_RX_COPY_THRESHOLD 256
-#define VXGE_DEF_FIFO_LENGTH 84
-
-#define NO_STEERING 0
-#define PORT_STEERING 0x1
-#define RTH_STEERING 0x2
-#define RX_TOS_STEERING 0x3
-#define RX_VLAN_STEERING 0x4
-#define RTH_BUCKET_SIZE 4
-
-#define TX_PRIORITY_STEERING 1
-#define TX_VLAN_STEERING 2
-#define TX_PORT_STEERING 3
-#define TX_MULTIQ_STEERING 4
-
-#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
-
-#define VXGE_TTI_BTIMER_VAL 250000
-
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_T1A_TTI_LTIMER_VAL 80
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_TTI_RTIMER_ADAPT_VAL 10
-#define VXGE_T1A_TTI_RTIMER_VAL 400
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
-#define VXGE_RTI_RTIMER_ADAPT_VAL 15
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
-#define VXGE_ISR_POLLING_CNT 8
-#define VXGE_MAX_CONFIG_DEV 0xFF
-#define VXGE_EXEC_MODE_DISABLE 0
-#define VXGE_EXEC_MODE_ENABLE 1
-#define VXGE_MAX_CONFIG_PORT 1
-#define VXGE_ALL_VID_DISABLE 0
-#define VXGE_ALL_VID_ENABLE 1
-#define VXGE_PAUSE_CTRL_DISABLE 0
-#define VXGE_PAUSE_CTRL_ENABLE 1
-
-#define TTI_TX_URANGE_A 5
-#define TTI_TX_URANGE_B 15
-#define TTI_TX_URANGE_C 40
-#define TTI_TX_UFC_A 5
-#define TTI_TX_UFC_B 40
-#define TTI_TX_UFC_C 60
-#define TTI_TX_UFC_D 100
-#define TTI_T1A_TX_UFC_A 30
-#define TTI_T1A_TX_UFC_B 80
-/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
-/* Slope - 93 */
-/* 60 - 9k Mtu, 140 - 1.5k mtu */
-#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
-
-/* Slope - 37 */
-/* 100 - 9k Mtu, 300 - 1.5k mtu */
-#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
-
-
-#define RTI_RX_URANGE_A 5
-#define RTI_RX_URANGE_B 15
-#define RTI_RX_URANGE_C 40
-#define RTI_T1A_RX_URANGE_A 1
-#define RTI_T1A_RX_URANGE_B 20
-#define RTI_T1A_RX_URANGE_C 50
-#define RTI_RX_UFC_A 1
-#define RTI_RX_UFC_B 5
-#define RTI_RX_UFC_C 10
-#define RTI_RX_UFC_D 15
-#define RTI_T1A_RX_UFC_B 20
-#define RTI_T1A_RX_UFC_C 50
-#define RTI_T1A_RX_UFC_D 60
-
-/*
- * The interrupt rate is maintained at 3k per second with the moderation
- * parameters for most traffic but not all. This is the maximum interrupt
- * count allowed per function with INTA or per vector in the case of
- * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
- */
-#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
-#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
-
-/* Milli secs timer period */
-#define VXGE_TIMER_DELAY 10000
-
-#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
-
-#define is_sriov(function_mode) \
- ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
- (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
- (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
-
-enum vxge_reset_event {
- /* reset events */
- VXGE_LL_VPATH_RESET = 0,
- VXGE_LL_DEVICE_RESET = 1,
- VXGE_LL_FULL_RESET = 2,
- VXGE_LL_START_RESET = 3,
- VXGE_LL_COMPL_RESET = 4
-};
-/* These flags represent the devices temporary state */
-enum vxge_device_state_t {
-__VXGE_STATE_RESET_CARD = 0,
-__VXGE_STATE_CARD_UP
-};
-
-enum vxge_mac_addr_state {
- /* mac address states */
- VXGE_LL_MAC_ADDR_IN_LIST = 0,
- VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1
-};
-
-struct vxge_drv_config {
- int config_dev_cnt;
- int total_dev_cnt;
- int g_no_cpus;
- unsigned int vpath_per_dev;
-};
-
-struct macInfo {
- unsigned char macaddr[ETH_ALEN];
- unsigned char macmask[ETH_ALEN];
- unsigned int vpath_no;
- enum vxge_mac_addr_state state;
-};
-
-struct vxge_config {
- int tx_pause_enable;
- int rx_pause_enable;
- int napi_weight;
- int intr_type;
-#define INTA 0
-#define MSI 1
-#define MSI_X 2
-
- int addr_learn_en;
-
- u32 rth_steering:2,
- rth_algorithm:2,
- rth_hash_type_tcpipv4:1,
- rth_hash_type_ipv4:1,
- rth_hash_type_tcpipv6:1,
- rth_hash_type_ipv6:1,
- rth_hash_type_tcpipv6ex:1,
- rth_hash_type_ipv6ex:1,
- rth_bkt_sz:8;
- int rth_jhash_golden_ratio;
- int tx_steering_type;
- int fifo_indicate_max_pkts;
- struct vxge_hw_device_hw_info device_hw_info;
-};
-
-struct vxge_msix_entry {
- /* Mimicing the msix_entry struct of Kernel. */
- u16 vector;
- u16 entry;
- u16 in_use;
- void *arg;
-};
-
-/* Software Statistics */
-
-struct vxge_sw_stats {
-
- /* Virtual Path */
- unsigned long vpaths_open;
- unsigned long vpath_open_fail;
-
- /* Misc. */
- unsigned long link_up;
- unsigned long link_down;
-};
-
-struct vxge_mac_addrs {
- struct list_head item;
- u64 macaddr;
- u64 macmask;
- enum vxge_mac_addr_state state;
-};
-
-struct vxgedev;
-
-struct vxge_fifo_stats {
- struct u64_stats_sync syncp;
- u64 tx_frms;
- u64 tx_bytes;
-
- unsigned long tx_errors;
- unsigned long txd_not_free;
- unsigned long txd_out_of_desc;
- unsigned long pci_map_fail;
-};
-
-struct vxge_fifo {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_fifo *handle;
- struct netdev_queue *txq;
-
- int tx_steering_type;
- int indicate_max_pkts;
-
- /* Adaptive interrupt moderation parameters used in T1A */
- unsigned long interrupt_count;
- unsigned long jiffies;
-
- u32 tx_vector_no;
- /* Tx stats */
- struct vxge_fifo_stats stats;
-} ____cacheline_aligned;
-
-struct vxge_ring_stats {
- struct u64_stats_sync syncp;
- u64 rx_frms;
- u64 rx_mcast;
- u64 rx_bytes;
-
- unsigned long rx_errors;
- unsigned long rx_dropped;
- unsigned long prev_rx_frms;
- unsigned long pci_map_fail;
- unsigned long skb_alloc_fail;
-};
-
-struct vxge_ring {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_ring *handle;
- /* The vpath id maintained in the driver -
- * 0 to 'maximum_vpaths_in_function - 1'
- */
- int driver_id;
-
- /* Adaptive interrupt moderation parameters used in T1A */
- unsigned long interrupt_count;
- unsigned long jiffies;
-
- /* copy of the flag indicating whether rx_hwts is to be used */
- u32 rx_hwts:1;
-
- int pkts_processed;
- int budget;
-
- struct napi_struct napi;
- struct napi_struct *napi_p;
-
-#define VXGE_MAX_MAC_ADDR_COUNT 30
-
- int vlan_tag_strip;
- u32 rx_vector_no;
- enum vxge_hw_status last_status;
-
- /* Rx stats */
- struct vxge_ring_stats stats;
-} ____cacheline_aligned;
-
-struct vxge_vpath {
- struct vxge_fifo fifo;
- struct vxge_ring ring;
-
- struct __vxge_hw_vpath_handle *handle;
-
- /* Actual vpath id for this vpath in the device - 0 to 16 */
- int device_id;
- int max_mac_addr_cnt;
- int is_configured;
- int is_open;
- struct vxgedev *vdev;
- u8 macaddr[ETH_ALEN];
- u8 macmask[ETH_ALEN];
-
-#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
- /* mac addresses currently programmed into NIC */
- u16 mac_addr_cnt;
- u16 mcast_addr_cnt;
- struct list_head mac_addr_list;
-
- u32 level_err;
- u32 level_trace;
-};
-#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \
- for (i = 0; i < vdev->no_of_vpath; i++) { \
- vdev->vpaths[i].level_err = err; \
- vdev->vpaths[i].level_trace = trace; \
- } \
- vdev->level_err = err; \
- vdev->level_trace = trace; \
-}
-
-struct vxgedev {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_device *devh;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- int vlan_tag_strip;
- struct vxge_config config;
- unsigned long state;
-
- /* Indicates which vpath to reset */
- unsigned long vp_reset;
-
- /* Timer used for polling vpath resets */
- struct timer_list vp_reset_timer;
-
- /* Timer used for polling vpath lockup */
- struct timer_list vp_lockup_timer;
-
- /*
- * Flags to track whether device is in All Multicast
- * or in promiscuous mode.
- */
- u16 all_multi_flg;
-
- /* A flag indicating whether rx_hwts is to be used or not. */
- u32 rx_hwts:1,
- titan1:1;
-
- struct vxge_msix_entry *vxge_entries;
- struct msix_entry *entries;
- /*
- * 4 for each vpath * 17;
- * total is 68
- */
-#define VXGE_MAX_REQUESTED_MSIX 68
-#define VXGE_INTR_STRLEN 80
- char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
-
- enum vxge_hw_event cric_err_event;
-
- int max_vpath_supported;
- int no_of_vpath;
-
- struct napi_struct napi;
- /* A debug option, when enabled and if error condition occurs,
- * the driver will do following steps:
- * - mask all interrupts
- * - Not clear the source of the alarm
- * - gracefully stop all I/O
- * A diagnostic dump of register and stats at this point
- * reveals very useful information.
- */
- int exec_mode;
- int max_config_port;
- struct vxge_vpath *vpaths;
-
- struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
- void __iomem *bar0;
- struct vxge_sw_stats stats;
- int mtu;
- /* Below variables are used for vpath selection to transmit a packet */
- u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
- u64 vpaths_deployed;
-
- u32 intr_cnt;
- u32 level_err;
- u32 level_trace;
- char fw_version[VXGE_HW_FW_STRLEN];
- struct work_struct reset_task;
-};
-
-struct vxge_rx_priv {
- struct sk_buff *skb;
- unsigned char *skb_data;
- dma_addr_t data_dma;
- dma_addr_t data_size;
-};
-
-struct vxge_tx_priv {
- struct sk_buff *skb;
- dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
-};
-
-#define VXGE_MODULE_PARAM_INT(p, val) \
- static int p = val; \
- module_param(p, int, 0)
-
-static inline
-void vxge_os_timer(struct timer_list *timer, void (*func)(struct timer_list *),
- unsigned long timeout)
-{
- timer_setup(timer, func, 0);
- mod_timer(timer, jiffies + timeout);
-}
-
-void vxge_initialize_ethtool_ops(struct net_device *ndev);
-int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
-
-/* #define VXGE_DEBUG_INIT: debug for initialization functions
- * #define VXGE_DEBUG_TX : debug transmit related functions
- * #define VXGE_DEBUG_RX : debug recevice related functions
- * #define VXGE_DEBUG_MEM : debug memory module
- * #define VXGE_DEBUG_LOCK: debug locks
- * #define VXGE_DEBUG_SEM : debug semaphore
- * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
-*/
-#define VXGE_DEBUG_INIT 0x00000001
-#define VXGE_DEBUG_TX 0x00000002
-#define VXGE_DEBUG_RX 0x00000004
-#define VXGE_DEBUG_MEM 0x00000008
-#define VXGE_DEBUG_LOCK 0x00000010
-#define VXGE_DEBUG_SEM 0x00000020
-#define VXGE_DEBUG_ENTRYEXIT 0x00000040
-#define VXGE_DEBUG_INTR 0x00000080
-#define VXGE_DEBUG_LL_CONFIG 0x00000100
-
-/* Debug tracing for VXGE driver */
-#ifndef VXGE_DEBUG_MASK
-#define VXGE_DEBUG_MASK 0x0
-#endif
-
-#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
-#define vxge_debug_ll_config(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_ll_config(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
-#define vxge_debug_init(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_init(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
-#define vxge_debug_tx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_tx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
-#define vxge_debug_rx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_rx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
-#define vxge_debug_mem(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_mem(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
-#define vxge_debug_entryexit(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_entryexit(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
-#define vxge_debug_intr(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_intr(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
- vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \
- level, mask);\
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
- vxge_hw_device_error_level_get((struct __vxge_hw_device *) \
- vdev->devh), \
- vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \
- vdev->devh));\
-}
-
-#ifdef NETIF_F_GSO
-#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
-#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
-#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
-#endif
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-reg.h b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
deleted file mode 100644
index 3e658b175947..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-reg.h
+++ /dev/null
@@ -1,4636 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized
- * Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_REG_H
-#define VXGE_REG_H
-
-/*
- * vxge_mBIT(loc) - set bit at offset
- */
-#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc))
-
-/*
- * vxge_vBIT(val, loc, sz) - set bits at offset
- */
-#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
-#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
-
-/*
- * vxge_bVALn(bits, loc, n) - Get the value of n bits at location
- */
-#define vxge_bVALn(bits, loc, n) \
- ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1))
-
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \
- vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \
- vxge_bVALn(bits, 48, 8)
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \
- vxge_bVALn(bits, 56, 8)
-
-#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \
- vxge_bVALn(bits, 3, 5)
-#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \
- vxge_bVALn(bits, 5, 3)
-#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5
-
-#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17
-#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17
-#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
-#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
-
-#define VXGE_HW_FW_API_GET_EPROM_REV 31
-
-#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
-#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
-#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
-#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
-
-#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
-#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
-#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
-#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
-
-#define VXGE_HW_FW_API_GET_FUNC_MODE 29
-#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
-
-#define VXGE_HW_FW_UPGRADE_MEMO 13
-#define VXGE_HW_FW_UPGRADE_ACTION 16
-#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
-#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
-#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
-#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
-
-#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
-#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
-#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
-
-#define VXGE_HW_ASIC_MODE_RESERVED 0
-#define VXGE_HW_ASIC_MODE_NO_IOV 1
-#define VXGE_HW_ASIC_MODE_SR_IOV 2
-#define VXGE_HW_ASIC_MODE_MR_IOV 3
-
-#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19)
-#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23)
-#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31)
-
-#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1)
-
-#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \
- vxge_bVALn(bits, 50, 14)
-
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \
- vxge_bVALn(bits, 0, 17)
-
-#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \
- vxge_bVALn(bits, 3, 5)
-
-#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \
- vxge_bVALn(bits, 17, 15)
-
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2
-
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1
-
-#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \
- (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7))
-#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \
- vxge_bVALn(val, 61, 3)
-#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \
- (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7))
-#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \
- vxge_bVALn(val, 61, 3)
-
-#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits
-#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits
-
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \
- vxge_bVALn(bits, 1, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \
- vxge_bVALn(bits, 17, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \
- vxge_bVALn(bits, 33, 15)
-
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \
- vxge_vBIT(val, 49, 15)
-
-#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0
-#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1
-#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2
-
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1
-
-#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0
-#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1
-
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13
-
-#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
-
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \
- vxge_mBIT(54)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \
- vxge_bVALn(bits, 55, 5)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \
- vxge_vBIT(val, 55, 5)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \
- vxge_bVALn(bits, 62, 2)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172
-
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \
- vxge_bVALn(bits, 7, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \
- vxge_bVALn(bits, 8, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \
- vxge_bVALn(bits, 4, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \
- vxge_bVALn(bits, 10, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \
- vxge_vBIT(val, 10, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \
- vxge_bVALn(bits, 15, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \
- vxge_bVALn(bits, 19, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \
- vxge_bVALn(bits, 23, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \
- vxge_bVALn(bits, 27, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \
- vxge_bVALn(bits, 31, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \
- vxge_bVALn(bits, 35, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \
- vxge_bVALn(bits, 39, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \
- vxge_bVALn(bits, 43, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \
- vxge_vBIT(val, 32, 32)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \
- vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \
- vxge_bVALn(bits, 32, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \
- vxge_vBIT(val, 32, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \
- vxge_bVALn(bits, 36, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \
- vxge_vBIT(val, 36, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \
- vxge_bVALn(bits, 40, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \
- vxge_vBIT(val, 40, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \
- vxge_bVALn(bits, 42, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \
- vxge_vBIT(val, 42, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \
- vxge_bVALn(bits, 0, 64)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \
- vxge_vBIT(val, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \
- vxge_vBIT(val, 62, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \
- vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \
- vxge_bVALn(bits, 40, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 41, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \
- vxge_vBIT(val, 41, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 48, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \
- vxge_bVALn(bits, 56, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 57, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \
- vxge_vBIT(val, 57, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \
- vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \
- vxge_bVALn(bits, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \
- vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \
- vxge_bVALn(bits, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
-
-#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
- vxge_bVALn(bits, 0, 18)
-
-#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits)
-#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits)
-#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\
-) vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \
- vxge_bVALn(bits, 32, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \
- vxge_bVALn(bits, 32, 16)
-
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\
-) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\
-) vxge_bVALn(bits, 32, 32)
-#define \
-VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16)
-
-#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 16, 8)
-
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 16, 8)
-
-#define VXGE_HW_CONFIG_PRIV_H
-
-#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL
-#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL
-#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL
-#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL
-
-#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL
-
-/*
- * The registers are memory mapped and are native big-endian byte order. The
- * little-endian hosts are handled by enabling hardware byte-swapping for
- * register and dma operations.
- */
-struct vxge_hw_legacy_reg {
-
- u8 unused00010[0x00010];
-
-/*0x00010*/ u64 toc_swapper_fb;
-#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00018*/ u64 pifm_rd_swap_en;
-#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00020*/ u64 pifm_rd_flip_en;
-#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00028*/ u64 pifm_wr_swap_en;
-#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00030*/ u64 pifm_wr_flip_en;
-#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00038*/ u64 toc_first_pointer;
-#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00040*/ u64 host_access_en;
-#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64)
-
-} __packed;
-
-struct vxge_hw_toc_reg {
-
- u8 unused00050[0x00050];
-
-/*0x00050*/ u64 toc_common_pointer;
-#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00058*/ u64 toc_memrepair_pointer;
-#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17];
-#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused001e0[0x001e0-0x000e8];
-
-/*0x001e0*/ u64 toc_mrpcim_pointer;
-#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x001e8*/ u64 toc_srpcim_pointer[17];
-#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused00278[0x00278-0x00270];
-
-/*0x00278*/ u64 toc_vpmgmt_pointer[17];
-#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused00390[0x00390-0x00300];
-
-/*0x00390*/ u64 toc_vpath_pointer[17];
-#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused004a0[0x004a0-0x00418];
-
-/*0x004a0*/ u64 toc_kdfc;
-#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
-#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
-/*0x004a8*/ u64 toc_usdc;
-#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
-#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
-/*0x004b0*/ u64 toc_kdfc_vpath_stride;
-#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \
- vxge_vBIT(val, 0, 64)
-/*0x004b8*/ u64 toc_kdfc_fifo_stride;
-#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \
- vxge_vBIT(val, 0, 64)
-
-} __packed;
-
-struct vxge_hw_common_reg {
-
- u8 unused00a00[0x00a00];
-
-/*0x00a00*/ u64 prc_status1;
-#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n)
-/*0x00a08*/ u64 rxdcm_reset_in_progress;
-#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
-/*0x00a10*/ u64 replicq_flush_in_progress;
-#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a18*/ u64 rxpe_cmds_reset_in_progress;
-#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a20*/ u64 mxp_cmds_reset_in_progress;
-#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a28*/ u64 noffload_reset_in_progress;
-#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
-/*0x00a30*/ u64 rd_req_in_progress;
-#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n)
-/*0x00a38*/ u64 rd_req_outstanding;
-#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n)
-/*0x00a40*/ u64 kdfc_reset_in_progress;
-#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
- u8 unused00b00[0x00b00-0x00a48];
-
-/*0x00b00*/ u64 one_cfg_vp;
-#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n)
-/*0x00b08*/ u64 one_common;
-#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n)
- u8 unused00b80[0x00b80-0x00b10];
-
-/*0x00b80*/ u64 tim_int_en;
-#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n)
-/*0x00b88*/ u64 tim_set_int_en;
-#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n)
-/*0x00b90*/ u64 tim_clr_int_en;
-#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n)
-/*0x00b98*/ u64 tim_mask_int_during_reset;
-#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n)
-/*0x00ba0*/ u64 tim_reset_in_progress;
-#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n)
-/*0x00ba8*/ u64 tim_outstanding_bmap;
-#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n)
- u8 unused00c00[0x00c00-0x00bb0];
-
-/*0x00c00*/ u64 msg_reset_in_progress;
-#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17)
-/*0x00c08*/ u64 msg_mxp_mr_ready;
-#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n)
-/*0x00c10*/ u64 msg_uxp_mr_ready;
-#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n)
-/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch;
-#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n)
-/*0x00c20*/ u64 msg_umq_rtl_bwr;
-#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n)
- u8 unused00d00[0x00d00-0x00c28];
-
-/*0x00d00*/ u64 cmn_rsthdlr_cfg0;
-#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17)
-/*0x00d08*/ u64 cmn_rsthdlr_cfg1;
-#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17)
-/*0x00d10*/ u64 cmn_rsthdlr_cfg2;
-#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17)
-/*0x00d18*/ u64 cmn_rsthdlr_cfg3;
-#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17)
-/*0x00d20*/ u64 cmn_rsthdlr_cfg4;
-#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17)
- u8 unused00d40[0x00d40-0x00d28];
-
-/*0x00d40*/ u64 cmn_rsthdlr_cfg8;
-#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17)
-/*0x00d48*/ u64 stats_cfg0;
-#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17)
- u8 unused00da8[0x00da8-0x00d50];
-
-/*0x00da8*/ u64 clear_msix_mask_vect[4];
-#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00dc8*/ u64 set_msix_mask_vect[4];
-#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17)
-/*0x00de8*/ u64 clear_msix_mask_all_vect;
-#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00df0*/ u64 set_msix_mask_all_vect;
-#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00df8*/ u64 mask_vector[4];
-#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17)
-/*0x00e18*/ u64 msix_pending_vector[4];
-#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00e38*/ u64 clr_msix_one_shot_vec[4];
-#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00e58*/ u64 titan_asic_id;
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8)
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8)
-/*0x00e60*/ u64 titan_general_int_status;
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \
- vxge_vBIT(val, 3, 17)
- u8 unused00e70[0x00e70-0x00e68];
-
-/*0x00e70*/ u64 titan_mask_all_int;
-#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7)
-#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15)
- u8 unused00e80[0x00e80-0x00e78];
-
-/*0x00e80*/ u64 tim_int_status0;
-#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64)
-/*0x00e88*/ u64 tim_int_mask0;
-#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64)
-/*0x00e90*/ u64 tim_int_status1;
-#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4)
-/*0x00e98*/ u64 tim_int_mask1;
-#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4)
-/*0x00ea0*/ u64 rti_int_status;
-#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17)
-/*0x00ea8*/ u64 rti_int_mask;
-#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17)
-/*0x00eb0*/ u64 adapter_status;
-#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0)
-#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1)
-#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2)
-#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3)
-#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4)
-#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5)
-#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6)
-#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7)
-#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8)
-#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9)
-#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10)
-#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11)
-#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12)
-#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8)
-/*0x00eb8*/ u64 gen_ctrl;
-#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0)
-#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1)
-#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2)
-#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3)
-#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4)
-#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5)
-#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4)
- u8 unused00ed0[0x00ed0-0x00ec0];
-
-/*0x00ed0*/ u64 adapter_ready;
-#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63)
-/*0x00ed8*/ u64 outstanding_read;
-#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17)
-/*0x00ee0*/ u64 vpath_rst_in_prog;
-#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17)
-/*0x00ee8*/ u64 vpath_reg_modified;
-#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17)
- u8 unused00fc0[0x00fc0-0x00ef0];
-
-/*0x00fc0*/ u64 cp_reset_in_progress;
-#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n)
- u8 unused01080[0x01080-0x00fc8];
-
-/*0x01080*/ u64 xgmac_ready;
-#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17)
- u8 unused010c0[0x010c0-0x01088];
-
-/*0x010c0*/ u64 fbif_ready;
-#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17)
- u8 unused01100[0x01100-0x010c8];
-
-/*0x01100*/ u64 vplane_assignments;
-#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5)
-/*0x01108*/ u64 vpath_assignments;
-#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17)
-/*0x01110*/ u64 resource_assignments;
-#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01118*/ u64 host_type_assignments;
-#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \
- vxge_vBIT(val, 5, 3)
- u8 unused01128[0x01128-0x01120];
-
-/*0x01128*/ u64 max_resource_assignments;
-#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \
- vxge_vBIT(val, 11, 5)
-/*0x01130*/ u64 pf_vpath_assignments;
-#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused01200[0x01200-0x01138];
-
-/*0x01200*/ u64 rts_access_icmp;
-#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01208*/ u64 rts_access_tcpsyn;
-#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01210*/ u64 rts_access_zl4pyld;
-#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01218*/ u64 rts_access_l4prtcl_tcp;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01220*/ u64 rts_access_l4prtcl_udp;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01228*/ u64 rts_access_l4prtcl_flex;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01230*/ u64 rts_access_ipfrag;
-#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17)
-
-} __packed;
-
-struct vxge_hw_memrepair_reg {
- u64 unused1;
- u64 unused2;
-} __packed;
-
-struct vxge_hw_pcicfgmgmt_reg {
-
-/*0x00000*/ u64 resource_no;
-#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3)
-/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00020*/ u64 msixgrp_no;
-#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11)
-
-} __packed;
-
-struct vxge_hw_mrpcim_reg {
-/*0x00000*/ u64 g3fbct_int_status;
-#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x00008*/ u64 g3fbct_int_mask;
-/*0x00010*/ u64 g3fbct_err_reg;
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
-/*0x00018*/ u64 g3fbct_err_mask;
-/*0x00020*/ u64 g3fbct_err_alarm;
-
- u8 unused00a00[0x00a00-0x00028];
-
-/*0x00a00*/ u64 wrdma_int_status;
-#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9)
-#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12)
-#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17)
-/*0x00a08*/ u64 wrdma_int_mask;
-/*0x00a10*/ u64 rc_alarm_reg;
-#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1)
-#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3)
-#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5)
-#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6)
-#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8)
-#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9)
-#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10)
-#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12)
-/*0x00a18*/ u64 rc_alarm_mask;
-/*0x00a20*/ u64 rc_alarm_alarm;
-/*0x00a28*/ u64 rxdrm_sm_err_reg;
-#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a30*/ u64 rxdrm_sm_err_mask;
-/*0x00a38*/ u64 rxdrm_sm_err_alarm;
-/*0x00a40*/ u64 rxdcm_sm_err_reg;
-#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a48*/ u64 rxdcm_sm_err_mask;
-/*0x00a50*/ u64 rxdcm_sm_err_alarm;
-/*0x00a58*/ u64 rxdwm_sm_err_reg;
-#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a60*/ u64 rxdwm_sm_err_mask;
-/*0x00a68*/ u64 rxdwm_sm_err_alarm;
-/*0x00a70*/ u64 rda_err_reg;
-#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0)
-#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1)
-#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2)
-#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3)
-#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4)
-#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5)
-#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6)
-#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7)
-/*0x00a78*/ u64 rda_err_mask;
-/*0x00a80*/ u64 rda_err_alarm;
-/*0x00a88*/ u64 rda_ecc_db_reg;
-#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
-/*0x00a90*/ u64 rda_ecc_db_mask;
-/*0x00a98*/ u64 rda_ecc_db_alarm;
-/*0x00aa0*/ u64 rda_ecc_sg_reg;
-#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
-/*0x00aa8*/ u64 rda_ecc_sg_mask;
-/*0x00ab0*/ u64 rda_ecc_sg_alarm;
-/*0x00ab8*/ u64 rqa_err_reg;
-#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0)
-/*0x00ac0*/ u64 rqa_err_mask;
-/*0x00ac8*/ u64 rqa_err_alarm;
-/*0x00ad0*/ u64 frf_alarm_reg;
-#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n)
-/*0x00ad8*/ u64 frf_alarm_mask;
-/*0x00ae0*/ u64 frf_alarm_alarm;
-/*0x00ae8*/ u64 rocrc_alarm_reg;
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4)
-#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5)
-#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22)
-/*0x00af0*/ u64 rocrc_alarm_mask;
-/*0x00af8*/ u64 rocrc_alarm_alarm;
-/*0x00b00*/ u64 wde0_alarm_reg;
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b08*/ u64 wde0_alarm_mask;
-/*0x00b10*/ u64 wde0_alarm_alarm;
-/*0x00b18*/ u64 wde1_alarm_reg;
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b20*/ u64 wde1_alarm_mask;
-/*0x00b28*/ u64 wde1_alarm_alarm;
-/*0x00b30*/ u64 wde2_alarm_reg;
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b38*/ u64 wde2_alarm_mask;
-/*0x00b40*/ u64 wde2_alarm_alarm;
-/*0x00b48*/ u64 wde3_alarm_reg;
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b50*/ u64 wde3_alarm_mask;
-/*0x00b58*/ u64 wde3_alarm_alarm;
-
- u8 unused00be8[0x00be8-0x00b60];
-
-/*0x00be8*/ u64 rx_w_round_robin_0;
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5)
-/*0x00bf0*/ u64 rx_w_round_robin_1;
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00bf8*/ u64 rx_w_round_robin_2;
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c00*/ u64 rx_w_round_robin_3;
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c08*/ u64 rx_w_round_robin_4;
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c10*/ u64 rx_w_round_robin_5;
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c18*/ u64 rx_w_round_robin_6;
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c20*/ u64 rx_w_round_robin_7;
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c28*/ u64 rx_w_round_robin_8;
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c30*/ u64 rx_w_round_robin_9;
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c38*/ u64 rx_w_round_robin_10;
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c40*/ u64 rx_w_round_robin_11;
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c48*/ u64 rx_w_round_robin_12;
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c50*/ u64 rx_w_round_robin_13;
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c58*/ u64 rx_w_round_robin_14;
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c60*/ u64 rx_w_round_robin_15;
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c68*/ u64 rx_w_round_robin_16;
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c70*/ u64 rx_w_round_robin_17;
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c78*/ u64 rx_w_round_robin_18;
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c80*/ u64 rx_w_round_robin_19;
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c88*/ u64 rx_w_round_robin_20;
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c90*/ u64 rx_w_round_robin_21;
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \
- vxge_vBIT(val, 19, 5)
-
-#define VXGE_HW_WRR_RING_SERVICE_STATES 171
-#define VXGE_HW_WRR_RING_COUNT 22
-
-/*0x00c98*/ u64 rx_queue_priority_0;
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-/*0x00ca0*/ u64 rx_queue_priority_1;
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5)
-/*0x00ca8*/ u64 rx_queue_priority_2;
-#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5)
- u8 unused00cc8[0x00cc8-0x00cb0];
-
-/*0x00cc8*/ u64 replication_queue_priority;
-#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00cd0*/ u64 rx_queue_select;
-#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n)
-#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15)
-#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23)
-/*0x00cd8*/ u64 rqa_vpbp_ctrl;
-#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15)
-#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23)
-#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31)
-/*0x00ce0*/ u64 rx_multi_cast_ctrl;
-#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \
- vxge_vBIT(val, 2, 30)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32)
-/*0x00ce8*/ u64 wde_prm_ctrl;
-#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33)
-#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2)
-/*0x00cf0*/ u64 noa_ctrl;
-#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4)
-/*0x00cf8*/ u64 phase_cfg;
-#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0)
-#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3)
-#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7)
-#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11)
-#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15)
-#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19)
-#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23)
-#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27)
-#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31)
-#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35)
-#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39)
-#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43)
-/*0x00d00*/ u64 rcq_bypq_cfg;
-#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22)
-#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9)
-#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9)
- u8 unused00e00[0x00e00-0x00d08];
-
-/*0x00e00*/ u64 doorbell_int_status;
-#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7)
-#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15)
-/*0x00e08*/ u64 doorbell_int_mask;
-/*0x00e10*/ u64 kdfc_err_reg;
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
-/*0x00e18*/ u64 kdfc_err_mask;
-/*0x00e20*/ u64 kdfc_err_reg_alarm;
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
- u8 unused00e40[0x00e40-0x00e28];
-/*0x00e40*/ u64 kdfc_vp_partition_0;
-#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0)
-#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15)
-/*0x00e48*/ u64 kdfc_vp_partition_1;
-#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15)
-/*0x00e50*/ u64 kdfc_vp_partition_2;
-#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15)
-/*0x00e58*/ u64 kdfc_vp_partition_3;
-#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15)
-/*0x00e60*/ u64 kdfc_vp_partition_4;
-#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15)
-/*0x00e68*/ u64 kdfc_vp_partition_5;
-#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15)
-/*0x00e70*/ u64 kdfc_vp_partition_6;
-#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15)
-/*0x00e78*/ u64 kdfc_vp_partition_7;
-#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15)
-/*0x00e80*/ u64 kdfc_vp_partition_8;
-#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15)
-/*0x00e88*/ u64 kdfc_w_round_robin_0;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
- u8 unused0f28[0x0f28-0x0e90];
-
-/*0x00f28*/ u64 kdfc_w_round_robin_20;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
-#define VXGE_HW_WRR_FIFO_COUNT 20
-
- u8 unused0fc8[0x0fc8-0x0f30];
-
-/*0x00fc8*/ u64 kdfc_w_round_robin_40;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
- u8 unused1068[0x01068-0x0fd0];
-
-/*0x01068*/ u64 kdfc_entry_type_sel_0;
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2)
-/*0x01070*/ u64 kdfc_entry_type_sel_1;
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2)
-/*0x01078*/ u64 kdfc_fifo_0_ctrl;
-#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176
-#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153
-
- u8 unused1100[0x01100-0x1080];
-
-/*0x01100*/ u64 kdfc_fifo_17_ctrl;
-#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
-
- u8 unused1600[0x01600-0x1108];
-
-/*0x01600*/ u64 rxmac_int_status;
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7)
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \
- vxge_mBIT(11)
-/*0x01608*/ u64 rxmac_int_mask;
- u8 unused01618[0x01618-0x01610];
-
-/*0x01618*/ u64 rxmac_gen_err_reg;
-/*0x01620*/ u64 rxmac_gen_err_mask;
-/*0x01628*/ u64 rxmac_gen_err_alarm;
-/*0x01630*/ u64 rxmac_ecc_err_reg;
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \
- vxge_vBIT(val, 24, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \
- vxge_vBIT(val, 26, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \
- vxge_vBIT(val, 28, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \
- vxge_vBIT(val, 30, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \
- vxge_vBIT(val, 40, 7)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \
- vxge_vBIT(val, 47, 7)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \
- vxge_vBIT(val, 54, 3)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \
- vxge_vBIT(val, 57, 3)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \
- vxge_mBIT(60)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \
- vxge_mBIT(61)
-/*0x01638*/ u64 rxmac_ecc_err_mask;
-/*0x01640*/ u64 rxmac_ecc_err_alarm;
-/*0x01648*/ u64 rxmac_various_err_reg;
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3)
-/*0x01650*/ u64 rxmac_various_err_mask;
-/*0x01658*/ u64 rxmac_various_err_alarm;
-/*0x01660*/ u64 rxmac_gen_cfg;
-#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11)
-/*0x01668*/ u64 rxmac_authorize_all_addr;
-#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n)
-/*0x01670*/ u64 rxmac_authorize_all_vid;
-#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n)
- u8 unused016c0[0x016c0-0x01678];
-
-/*0x016c0*/ u64 rxmac_red_rate_repl_queue;
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35)
- u8 unused016e0[0x016e0-0x016c8];
-
-/*0x016e0*/ u64 rxmac_cfg0_port[3];
-#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7)
-#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27)
-#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14)
- u8 unused01710[0x01710-0x016f8];
-
-/*0x01710*/ u64 rxmac_cfg2_port[3];
-#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3)
-/*0x01728*/ u64 rxmac_pause_cfg_port[3];
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59)
- u8 unused01758[0x01758-0x01740];
-
-/*0x01758*/ u64 rxmac_red_cfg0_port[3];
-#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n)
-/*0x01770*/ u64 rxmac_red_cfg1_port[3];
-#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11)
-/*0x01788*/ u64 rxmac_red_cfg2_port[3];
-#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n)
-/*0x017a0*/ u64 rxmac_link_util_port[3];
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \
- vxge_vBIT(val, 1, 7)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23)
- u8 unused017d0[0x017d0-0x017b8];
-
-/*0x017d0*/ u64 rxmac_status_port[3];
-#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3)
- u8 unused01800[0x01800-0x017e8];
-
-/*0x01800*/ u64 rxmac_rx_pa_cfg0;
-#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63)
-/*0x01808*/ u64 rxmac_rx_pa_cfg1;
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23)
- u8 unused01828[0x01828-0x01810];
-
-/*0x01828*/ u64 rts_mgr_cfg0;
-#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35)
-#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39)
-#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
-#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59)
-/*0x01830*/ u64 rts_mgr_cfg1;
-#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7)
-/*0x01838*/ u64 rts_mgr_criteria_priority;
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3)
-/*0x01840*/ u64 rts_mgr_da_pause_cfg;
-#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17)
-/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg;
-#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused01890[0x01890-0x01850];
-/*0x01890*/ u64 rts_mgr_cbasin_cfg;
- u8 unused01968[0x01968-0x01898];
-
-/*0x01968*/ u64 dbg_stat_rx_any_frms;
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused01a00[0x01a00-0x01970];
-
-/*0x01a00*/ u64 rxmac_red_rate_vp[17];
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
- u8 unused01e00[0x01e00-0x01a88];
-
-/*0x01e00*/ u64 xgmac_int_status;
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \
- vxge_mBIT(7)
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \
- vxge_mBIT(11)
-#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15)
-#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19)
-#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23)
-/*0x01e08*/ u64 xgmac_int_mask;
-/*0x01e10*/ u64 xmac_gen_err_reg;
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \
- vxge_mBIT(11)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \
- vxge_mBIT(19)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \
- vxge_mBIT(23)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \
- vxge_vBIT(val, 40, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \
- vxge_vBIT(val, 42, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \
- vxge_vBIT(val, 44, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \
- vxge_vBIT(val, 46, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \
- vxge_vBIT(val, 48, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \
- vxge_vBIT(val, 50, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \
- vxge_vBIT(val, 52, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \
- vxge_vBIT(val, 54, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \
- vxge_vBIT(val, 56, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \
- vxge_vBIT(val, 58, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63)
-/*0x01e18*/ u64 xmac_gen_err_mask;
-/*0x01e20*/ u64 xmac_gen_err_alarm;
-/*0x01e28*/ u64 xmac_link_err_port0_reg;
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \
- vxge_mBIT(19)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \
- vxge_mBIT(47)
-/*0x01e30*/ u64 xmac_link_err_port0_mask;
-/*0x01e38*/ u64 xmac_link_err_port0_alarm;
-/*0x01e40*/ u64 xmac_link_err_port1_reg;
-/*0x01e48*/ u64 xmac_link_err_port1_mask;
-/*0x01e50*/ u64 xmac_link_err_port1_alarm;
-/*0x01e58*/ u64 xgxs_gen_err_reg;
-#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63)
-/*0x01e60*/ u64 xgxs_gen_err_mask;
-/*0x01e68*/ u64 xgxs_gen_err_alarm;
-/*0x01e70*/ u64 asic_ntwk_err_reg;
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
-/*0x01e78*/ u64 asic_ntwk_err_mask;
-/*0x01e80*/ u64 asic_ntwk_err_alarm;
-/*0x01e88*/ u64 asic_gpio_err_reg;
-#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n)
-/*0x01e90*/ u64 asic_gpio_err_mask;
-/*0x01e98*/ u64 asic_gpio_err_alarm;
-/*0x01ea0*/ u64 xgmac_gen_status;
-#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3)
-#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11)
-/*0x01ea8*/ u64 xgmac_gen_fw_memo_status;
-#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask;
-#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64)
-/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status;
-#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01ec0*/ u64 xgmac_main_cfg_port[2];
-#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3)
- u8 unused01f40[0x01f40-0x01ed0];
-
-/*0x01f40*/ u64 xmac_gen_cfg;
-#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4)
-/*0x01f48*/ u64 xmac_timestamp;
-#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3)
-#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19)
-#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16)
-/*0x01f50*/ u64 xmac_stats_gen_cfg;
-#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15)
-/*0x01f58*/ u64 xmac_stats_sys_cmd;
-#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
-/*0x01f60*/ u64 xmac_stats_sys_data;
-#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
- u8 unused01f80[0x01f80-0x01f68];
-
-/*0x01f80*/ u64 asic_ntwk_ctrl;
-#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11)
-#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15)
-/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info;
-#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n)
-/*0x01f90*/ u64 asic_ntwk_cfg_port_num;
-#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n)
-/*0x01f98*/ u64 xmac_cfg_port[3];
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15)
-/*0x01fb0*/ u64 xmac_station_addr_port[2];
-#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
- u8 unused02020[0x02020-0x01fc0];
-
-/*0x02020*/ u64 lag_cfg;
-#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11)
-#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15)
-#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19)
-/*0x02028*/ u64 lag_status;
-#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3)
-#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \
- vxge_vBIT(val, 8, 8)
-/*0x02030*/ u64 lag_active_passive_cfg;
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \
- vxge_vBIT(val, 32, 16)
- u8 unused02040[0x02040-0x02038];
-
-/*0x02040*/ u64 lag_lacp_cfg;
-#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7)
-#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11)
-#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15)
-/*0x02048*/ u64 lag_timer_cfg_1;
-#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16)
-/*0x02050*/ u64 lag_timer_cfg_2;
-#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16)
-/*0x02058*/ u64 lag_sys_id;
-#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51)
-#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55)
-/*0x02060*/ u64 lag_sys_cfg;
-#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
- u8 unused02070[0x02070-0x02068];
-
-/*0x02070*/ u64 lag_aggr_addr_cfg[2];
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51)
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55)
-/*0x02080*/ u64 lag_aggr_id_cfg[2];
-#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16)
-/*0x02090*/ u64 lag_aggr_admin_key[2];
-#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x020a0*/ u64 lag_aggr_alt_admin_key;
-#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19)
-/*0x020a8*/ u64 lag_aggr_oper_key[2];
-#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x020b8*/ u64 lag_aggr_partner_sys_id[2];
-#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48)
-/*0x020c8*/ u64 lag_aggr_partner_info[2];
-#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \
- vxge_vBIT(val, 16, 16)
-/*0x020d8*/ u64 lag_aggr_state[2];
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15)
- u8 unused020f0[0x020f0-0x020e8];
-
-/*0x020f0*/ u64 lag_port_cfg[2];
-#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15)
-/*0x02100*/ u64 lag_port_actor_admin_cfg[2];
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16)
-/*0x02110*/ u64 lag_port_actor_admin_state[2];
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31)
-/*0x02120*/ u64 lag_port_partner_admin_sys_id[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
-/*0x02130*/ u64 lag_port_partner_admin_cfg[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \
- vxge_vBIT(val, 48, 16)
-/*0x02140*/ u64 lag_port_partner_admin_state[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31)
-/*0x02150*/ u64 lag_port_to_aggr[2];
-#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19)
-/*0x02160*/ u64 lag_port_actor_oper_key[2];
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x02170*/ u64 lag_port_actor_oper_state[2];
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
-/*0x02180*/ u64 lag_port_partner_oper_sys_id[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \
- vxge_vBIT(val, 0, 48)
-/*0x02190*/ u64 lag_port_partner_oper_info[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \
- vxge_vBIT(val, 48, 16)
-/*0x021a0*/ u64 lag_port_partner_oper_state[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \
- vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
-/*0x021b0*/ u64 lag_port_state_vars[2];
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \
- vxge_mBIT(32)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \
- vxge_mBIT(33)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \
- vxge_vBIT(val, 41, 3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \
- vxge_vBIT(val, 56, 4)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \
- vxge_vBIT(val, 60, 4)
-/*0x021c0*/ u64 lag_port_timer_cntr[2];
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \
- vxge_vBIT(val, 8, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \
- vxge_vBIT(val, 32, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \
- vxge_vBIT(val, 40, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \
- vxge_vBIT(val, 56, 8)
- u8 unused02208[0x02700-0x021d0];
-
-/*0x02700*/ u64 rtdma_int_status;
-#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1)
-#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2)
-#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4)
-#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5)
-/*0x02708*/ u64 rtdma_int_mask;
-/*0x02710*/ u64 pda_alarm_reg;
-#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0)
-#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1)
-/*0x02718*/ u64 pda_alarm_mask;
-/*0x02720*/ u64 pda_alarm_alarm;
-/*0x02728*/ u64 pcc_error_reg;
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n)
-/*0x02730*/ u64 pcc_error_mask;
-/*0x02738*/ u64 pcc_error_alarm;
-/*0x02740*/ u64 lso_error_reg;
-#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n)
-#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n)
-/*0x02748*/ u64 lso_error_mask;
-/*0x02750*/ u64 lso_error_alarm;
-/*0x02758*/ u64 sm_error_reg;
-#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15)
-/*0x02760*/ u64 sm_error_mask;
-/*0x02768*/ u64 sm_error_alarm;
-
- u8 unused027a8[0x027a8-0x02770];
-
-/*0x027a8*/ u64 txd_ownership_ctrl;
-#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7)
-/*0x027b0*/ u64 pcc_cfg;
-#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n)
-/*0x027b8*/ u64 pcc_control;
-#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15)
-#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31)
-/*0x027c0*/ u64 pda_status1;
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4)
-/*0x027c8*/ u64 rtdma_bw_timer;
-#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4)
-
- u8 unused02900[0x02900-0x027d0];
-/*0x02900*/ u64 g3cmct_int_status;
-#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x02908*/ u64 g3cmct_int_mask;
-/*0x02910*/ u64 g3cmct_err_reg;
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
-/*0x02918*/ u64 g3cmct_err_mask;
-/*0x02920*/ u64 g3cmct_err_alarm;
- u8 unused03000[0x03000-0x02928];
-
-/*0x03000*/ u64 mc_int_status;
-#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3)
-#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7)
-#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11)
-#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15)
-/*0x03008*/ u64 mc_int_mask;
-/*0x03010*/ u64 mc_err_reg;
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4)
-#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11)
-#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14)
-#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15)
-/*0x03018*/ u64 mc_err_mask;
-/*0x03020*/ u64 mc_err_alarm;
-/*0x03028*/ u64 grocrc_alarm_reg;
-#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3)
-#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7)
-/*0x03030*/ u64 grocrc_alarm_mask;
-/*0x03038*/ u64 grocrc_alarm_alarm;
- u8 unused03100[0x03100-0x03040];
-
-/*0x03100*/ u64 rx_thresh_cfg_repl;
-#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62)
-#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63)
- u8 unused033b8[0x033b8-0x03108];
-
-/*0x033b8*/ u64 fbmc_ecc_cfg;
-#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5)
- u8 unused03400[0x03400-0x033c0];
-
-/*0x03400*/ u64 pcipif_int_status;
-#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3)
-#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7)
-#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11)
-#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15)
-#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \
- vxge_mBIT(19)
-/*0x03408*/ u64 pcipif_int_mask;
-/*0x03410*/ u64 dbecc_err_reg;
-#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3)
-#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11)
-#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19)
-#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23)
-/*0x03418*/ u64 dbecc_err_mask;
-/*0x03420*/ u64 dbecc_err_alarm;
-/*0x03428*/ u64 sbecc_err_reg;
-#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15)
-#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23)
-/*0x03430*/ u64 sbecc_err_mask;
-/*0x03438*/ u64 sbecc_err_alarm;
-/*0x03440*/ u64 general_err_reg;
-#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27)
-/*0x03448*/ u64 general_err_mask;
-/*0x03450*/ u64 general_err_alarm;
-/*0x03458*/ u64 srpcim_msg_reg;
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \
- vxge_mBIT(0)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \
- vxge_mBIT(1)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \
- vxge_mBIT(2)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \
- vxge_mBIT(3)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \
- vxge_mBIT(4)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \
- vxge_mBIT(5)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \
- vxge_mBIT(6)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \
- vxge_mBIT(7)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \
- vxge_mBIT(8)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \
- vxge_mBIT(9)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \
- vxge_mBIT(10)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \
- vxge_mBIT(11)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \
- vxge_mBIT(12)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \
- vxge_mBIT(13)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \
- vxge_mBIT(14)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \
- vxge_mBIT(15)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \
- vxge_mBIT(16)
-/*0x03460*/ u64 srpcim_msg_mask;
-/*0x03468*/ u64 srpcim_msg_alarm;
- u8 unused03600[0x03600-0x03470];
-
-/*0x03600*/ u64 gcmg1_int_status;
-#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6)
-#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7)
-#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8)
-/*0x03608*/ u64 gcmg1_int_mask;
- u8 unused03a00[0x03a00-0x03610];
-
-/*0x03a00*/ u64 pcmg1_int_status;
-#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3)
-/*0x03a08*/ u64 pcmg1_int_mask;
- u8 unused04000[0x04000-0x03a10];
-
-/*0x04000*/ u64 one_int_status;
-#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \
- vxge_mBIT(13)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \
- vxge_mBIT(14)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15)
-#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23)
-#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31)
-#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39)
-#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47)
-#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55)
-/*0x04008*/ u64 one_int_mask;
- u8 unused04818[0x04818-0x04010];
-
-/*0x04818*/ u64 noa_wct_ctrl;
-#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0)
-/*0x04820*/ u64 rc_cfg2;
-#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16)
-/*0x04828*/ u64 rc_cfg3;
-#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16)
-/*0x04830*/ u64 rx_multi_cast_ctrl1;
-#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7)
-#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5)
-/*0x04838*/ u64 rxdm_dbg_rd;
-#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12)
-#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31)
-/*0x04840*/ u64 rxdm_dbg_rd_data;
-#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x04848*/ u64 rqa_top_prty_for_vh[17];
-#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
- vxge_vBIT(val, 59, 5)
- u8 unused04900[0x04900-0x048d0];
-
-/*0x04900*/ u64 tim_status;
-#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0)
-/*0x04908*/ u64 tim_ecc_enable;
-#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7)
-#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15)
-#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23)
-/*0x04910*/ u64 tim_bp_ctrl;
-#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7)
-#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15)
-#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23)
-/*0x04918*/ u64 tim_resource_assignment_vh[17];
-#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
-/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17];
-#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5)
- u8 unused04b00[0x04b00-0x04a28];
-
-/*0x04b00*/ u64 gcmg2_int_status;
-#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7)
-#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15)
-#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23)
-/*0x04b08*/ u64 gcmg2_int_mask;
-/*0x04b10*/ u64 gxtmc_err_reg;
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \
- vxge_mBIT(21)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \
- vxge_mBIT(22)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \
- vxge_mBIT(24)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \
- vxge_mBIT(25)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35)
-/*0x04b18*/ u64 gxtmc_err_mask;
-/*0x04b20*/ u64 gxtmc_err_alarm;
-/*0x04b28*/ u64 cmc_err_reg;
-#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0)
-/*0x04b30*/ u64 cmc_err_mask;
-/*0x04b38*/ u64 cmc_err_alarm;
-/*0x04b40*/ u64 gcp_err_reg;
-#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0)
-#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1)
-#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2)
-#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3)
-/*0x04b48*/ u64 gcp_err_mask;
-/*0x04b50*/ u64 gcp_err_alarm;
- u8 unused04f00[0x04f00-0x04b58];
-
-/*0x04f00*/ u64 pcmg2_int_status;
-#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7)
-#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15)
-#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23)
-/*0x04f08*/ u64 pcmg2_int_mask;
-/*0x04f10*/ u64 pxtmc_err_reg;
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57)
-/*0x04f18*/ u64 pxtmc_err_mask;
-/*0x04f20*/ u64 pxtmc_err_alarm;
-/*0x04f28*/ u64 cp_err_reg;
-#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2)
-#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49)
-#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50)
-#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51)
-#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52)
-#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53)
-#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57)
-#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60)
-#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61)
-#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62)
-#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63)
-/*0x04f30*/ u64 cp_err_mask;
-/*0x04f38*/ u64 cp_err_alarm;
- u8 unused04fe8[0x04f50-0x04f40];
-
-/*0x04f50*/ u64 cp_exc_reg;
-#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47)
-#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55)
-#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63)
-/*0x04f58*/ u64 cp_exc_mask;
-/*0x04f60*/ u64 cp_exc_alarm;
-/*0x04f68*/ u64 cp_exc_cause;
-#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32)
- u8 unused05200[0x05200-0x04f70];
-
-/*0x05200*/ u64 msg_int_status;
-#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7)
-#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63)
-/*0x05208*/ u64 msg_int_mask;
-/*0x05210*/ u64 tim_err_reg;
-#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19)
-#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20)
-#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22)
-#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23)
-#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n)
-/*0x05218*/ u64 tim_err_mask;
-/*0x05220*/ u64 tim_err_alarm;
-/*0x05228*/ u64 msg_err_reg;
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \
- vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31)
-#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \
- vxge_mBIT(36)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63)
-/*0x05230*/ u64 msg_err_mask;
-/*0x05238*/ u64 msg_err_alarm;
- u8 unused05340[0x05340-0x05240];
-
-/*0x05340*/ u64 msg_exc_reg;
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50)
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55)
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63)
-/*0x05348*/ u64 msg_exc_mask;
-/*0x05350*/ u64 msg_exc_alarm;
-/*0x05358*/ u64 msg_exc_cause;
-#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32)
- u8 unused05368[0x05380-0x05360];
-
-/*0x05380*/ u64 msg_err2_reg;
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \
- vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \
- vxge_mBIT(12)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \
- vxge_mBIT(13)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \
- vxge_mBIT(14)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \
- vxge_mBIT(15)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \
- vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \
- vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \
- vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \
- vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \
- vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \
- vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \
- vxge_mBIT(22)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \
- vxge_mBIT(23)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \
- vxge_mBIT(24)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \
- vxge_mBIT(25)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \
- vxge_mBIT(26)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \
- vxge_mBIT(27)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \
- vxge_mBIT(28)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(30)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(31)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(32)
-#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62)
-#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63)
-/*0x05388*/ u64 msg_err2_mask;
-/*0x05390*/ u64 msg_err2_alarm;
-/*0x05398*/ u64 msg_err3_reg;
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57)
-/*0x053a0*/ u64 msg_err3_mask;
-/*0x053a8*/ u64 msg_err3_alarm;
- u8 unused05600[0x05600-0x053b0];
-
-/*0x05600*/ u64 fau_gen_err_reg;
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3)
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7)
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11)
-#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15)
-/*0x05608*/ u64 fau_gen_err_mask;
-/*0x05610*/ u64 fau_gen_err_alarm;
-/*0x05618*/ u64 fau_ecc_err_reg;
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 2, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 4, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 8, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 10, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 14, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 16, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \
- vxge_vBIT(val, 18, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \
- vxge_vBIT(val, 20, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31)
-/*0x05620*/ u64 fau_ecc_err_mask;
-/*0x05628*/ u64 fau_ecc_err_alarm;
- u8 unused05658[0x05658-0x05630];
-/*0x05658*/ u64 fau_pa_cfg;
-#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3)
-#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7)
-#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11)
- u8 unused05668[0x05668-0x05660];
-
-/*0x05668*/ u64 dbg_stats_fau_rx_path;
-#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused056c0[0x056c0-0x05670];
-
-/*0x056c0*/ u64 fau_lag_cfg;
-#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7)
- u8 unused05800[0x05800-0x056c8];
-
-/*0x05800*/ u64 tpa_int_status;
-#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15)
-#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23)
-#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31)
-/*0x05808*/ u64 tpa_int_mask;
-/*0x05810*/ u64 orp_err_reg;
-#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11)
-#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43)
-#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47)
-/*0x05818*/ u64 orp_err_mask;
-/*0x05820*/ u64 orp_err_alarm;
-/*0x05828*/ u64 ptm_alarm_reg;
-#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3)
-#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7)
-#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15)
-#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2)
-#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2)
-/*0x05830*/ u64 ptm_alarm_mask;
-/*0x05838*/ u64 ptm_alarm_alarm;
-/*0x05840*/ u64 tpa_error_reg;
-#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3)
-#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11)
-/*0x05848*/ u64 tpa_error_mask;
-/*0x05850*/ u64 tpa_error_alarm;
-/*0x05858*/ u64 tpa_global_cfg;
-#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35)
- u8 unused05868[0x05870-0x05860];
-
-/*0x05870*/ u64 ptm_ecc_cfg;
-#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3)
-/*0x05878*/ u64 ptm_phase_cfg;
-#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3)
-#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7)
- u8 unused05898[0x05898-0x05880];
-
-/*0x05898*/ u64 dbg_stats_tpa_tx_path;
-#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused05900[0x05900-0x058a0];
-
-/*0x05900*/ u64 tmac_int_status;
-#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7)
-/*0x05908*/ u64 tmac_int_mask;
-/*0x05910*/ u64 txmac_gen_err_reg;
-#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3)
-#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7)
-/*0x05918*/ u64 txmac_gen_err_mask;
-/*0x05920*/ u64 txmac_gen_err_alarm;
-/*0x05928*/ u64 txmac_ecc_err_reg;
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39)
-/*0x05930*/ u64 txmac_ecc_err_mask;
-/*0x05938*/ u64 txmac_ecc_err_alarm;
- u8 unused05978[0x05978-0x05940];
-
-/*0x05978*/ u64 dbg_stat_tx_any_frms;
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused059a0[0x059a0-0x05980];
-
-/*0x059a0*/ u64 txmac_link_util_port[3];
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \
- vxge_vBIT(val, 1, 7)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23)
-/*0x059b8*/ u64 txmac_cfg0_port[3];
-#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7)
-#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
-/*0x059d0*/ u64 txmac_cfg1_port[3];
-#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8)
-/*0x059e8*/ u64 txmac_status_port[3];
-#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3)
- u8 unused05a20[0x05a20-0x05a00];
-
-/*0x05a20*/ u64 lag_distrib_dest;
-#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n)
-/*0x05a28*/ u64 lag_marker_cfg;
-#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7)
-#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51)
-/*0x05a30*/ u64 lag_tx_cfg;
-#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3)
-#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11)
-#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16)
-/*0x05a38*/ u64 lag_tx_status;
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \
- vxge_vBIT(val, 8, 8)
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused05d48[0x05d48-0x05a40];
-
-/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17];
-#define \
-VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\
- vxge_vBIT(val, 0, 64)
- u8 unused06420[0x06420-0x05dd0];
-
-/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17];
-#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17];
-
-/*0x06530*/ u64 debug_stats0;
-#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32)
-/*0x06538*/ u64 debug_stats1;
-#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32)
-/*0x06540*/ u64 debug_stats2;
-#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32)
-/*0x06548*/ u64 debug_stats3_vplane[17];
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16)
-/*0x065d0*/ u64 debug_stats4_vplane[17];
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16)
-
- u8 unused07000[0x07000-0x06658];
-
-/*0x07000*/ u64 mrpcim_general_int_status;
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22)
-/*0x07008*/ u64 mrpcim_general_int_mask;
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22)
-/*0x07010*/ u64 mrpcim_ppif_int_status;
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\
- vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\
- vxge_mBIT(32)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\
- vxge_mBIT(33)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\
- vxge_mBIT(34)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\
- vxge_mBIT(35)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\
- vxge_mBIT(36)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\
- vxge_mBIT(37)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\
- vxge_mBIT(38)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\
- vxge_mBIT(39)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\
- vxge_mBIT(40)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \
- vxge_mBIT(41)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \
- vxge_mBIT(42)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \
- vxge_mBIT(43)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \
- vxge_mBIT(44)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \
- vxge_mBIT(45)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \
- vxge_mBIT(46)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \
- vxge_mBIT(47)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \
- vxge_mBIT(55)
-/*0x07018*/ u64 mrpcim_ppif_int_mask;
- u8 unused07028[0x07028-0x07020];
-
-/*0x07028*/ u64 ini_errors_reg;
-#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3)
-#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19)
-#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23)
-#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27)
-#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31)
-#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39)
-#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43)
-#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47)
-#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51)
-#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55)
-#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59)
-#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63)
-/*0x07030*/ u64 ini_errors_mask;
-/*0x07038*/ u64 ini_errors_alarm;
-/*0x07040*/ u64 dma_errors_reg;
-#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3)
-#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34)
-/*0x07048*/ u64 dma_errors_mask;
-/*0x07050*/ u64 dma_errors_alarm;
-/*0x07058*/ u64 tgt_errors_reg;
-#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21)
-/*0x07060*/ u64 tgt_errors_mask;
-/*0x07068*/ u64 tgt_errors_alarm;
-/*0x07070*/ u64 config_errors_reg;
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35)
-#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63)
-/*0x07078*/ u64 config_errors_mask;
-/*0x07080*/ u64 config_errors_alarm;
- u8 unused07090[0x07090-0x07088];
-
-/*0x07090*/ u64 crdt_errors_reg;
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \
- vxge_mBIT(15)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \
- vxge_mBIT(23)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \
- vxge_mBIT(47)
-/*0x07098*/ u64 crdt_errors_mask;
-/*0x070a0*/ u64 crdt_errors_alarm;
- u8 unused070b0[0x070b0-0x070a8];
-
-/*0x070b0*/ u64 mrpcim_general_errors_reg;
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \
- vxge_mBIT(47)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51)
-/*0x070b8*/ u64 mrpcim_general_errors_mask;
-/*0x070c0*/ u64 mrpcim_general_errors_alarm;
- u8 unused070d0[0x070d0-0x070c8];
-
-/*0x070d0*/ u64 pll_errors_reg;
-#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3)
-#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7)
-#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11)
-/*0x070d8*/ u64 pll_errors_mask;
-/*0x070e0*/ u64 pll_errors_alarm;
-/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask;
-/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm;
-/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg;
-#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask;
-/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm;
- u8 unused07128[0x07128-0x07118];
-
-/*0x07128*/ u64 crdt_errors_vplane_reg[17];
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \
- vxge_mBIT(7)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \
- vxge_mBIT(11)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \
- vxge_mBIT(15)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \
- vxge_mBIT(19)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \
- vxge_mBIT(23)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \
- vxge_mBIT(27)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \
- vxge_mBIT(31)
-/*0x07130*/ u64 crdt_errors_vplane_mask[17];
-/*0x07138*/ u64 crdt_errors_vplane_alarm[17];
- u8 unused072f0[0x072f0-0x072c0];
-
-/*0x072f0*/ u64 mrpcim_rst_in_prog;
-#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7)
-/*0x072f8*/ u64 mrpcim_reg_modified;
-#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7)
-
- u8 unused07378[0x07378-0x07300];
-
-/*0x07378*/ u64 write_arb_pending;
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19)
-/*0x07380*/ u64 read_arb_pending;
-#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3)
-#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7)
-#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11)
-/*0x07388*/ u64 dmaif_dmadbl_pending;
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \
- vxge_vBIT(val, 13, 51)
-/*0x07390*/ u64 wrcrdtarb_status0_vplane[17];
-#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \
- vxge_vBIT(val, 0, 8)
-/*0x07418*/ u64 wrcrdtarb_status1_vplane[17];
-#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \
- vxge_vBIT(val, 4, 12)
- u8 unused07500[0x07500-0x074a0];
-
-/*0x07500*/ u64 mrpcim_general_cfg1;
-#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7)
-/*0x07508*/ u64 mrpcim_general_cfg2;
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \
- vxge_vBIT(val, 47, 5)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63)
-/*0x07510*/ u64 mrpcim_general_cfg3;
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \
- vxge_vBIT(val, 36, 16)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63)
-/*0x07518*/ u64 mrpcim_stats_start_host_addr;
-#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\
- vxge_vBIT(val, 0, 57)
-
- u8 unused07950[0x07950-0x07520];
-
-/*0x07950*/ u64 rdcrdtarb_cfg0;
-#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 18, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 26, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 34, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4)
-#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63)
- u8 unused07be8[0x07be8-0x07958];
-
-/*0x07be8*/ u64 bf_sw_reset;
-#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8)
-/*0x07bf0*/ u64 sw_reset_status;
-#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7)
-#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15)
- u8 unused07d30[0x07d30-0x07bf8];
-
-/*0x07d30*/ u64 mrpcim_debug_stats0;
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32)
-/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07ed0*/ u64 mrpcim_debug_stats4;
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07ed8*/ u64 genstats_count01;
-#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32)
-/*0x07ee0*/ u64 genstats_count23;
-#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32)
-/*0x07ee8*/ u64 genstats_count4;
-#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32)
-/*0x07ef0*/ u64 genstats_count5;
-#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32)
-
- u8 unused07f08[0x07f08-0x07ef8];
-
-/*0x07f08*/ u64 genstats_cfg[6];
-#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17)
-/*0x07f38*/ u64 genstat_64bit_cfg;
-#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3)
-#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7)
- u8 unused08000[0x08000-0x07f40];
-/*0x08000*/ u64 gcmg3_int_status;
-#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0)
-#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1)
-#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4)
-#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6)
-/*0x08008*/ u64 gcmg3_int_mask;
- u8 unused09000[0x09000-0x8010];
-
-/*0x09000*/ u64 g3ifcmd_fb_int_status;
-#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09008*/ u64 g3ifcmd_fb_int_mask;
-/*0x09010*/ u64 g3ifcmd_fb_err_reg;
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09018*/ u64 g3ifcmd_fb_err_mask;
-/*0x09020*/ u64 g3ifcmd_fb_err_alarm;
-
- u8 unused09400[0x09400-0x09028];
-
-/*0x09400*/ u64 g3ifcmd_cmu_int_status;
-#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09408*/ u64 g3ifcmd_cmu_int_mask;
-/*0x09410*/ u64 g3ifcmd_cmu_err_reg;
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09418*/ u64 g3ifcmd_cmu_err_mask;
-/*0x09420*/ u64 g3ifcmd_cmu_err_alarm;
-
- u8 unused09800[0x09800-0x09428];
-
-/*0x09800*/ u64 g3ifcmd_cml_int_status;
-#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09808*/ u64 g3ifcmd_cml_int_mask;
-/*0x09810*/ u64 g3ifcmd_cml_err_reg;
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09818*/ u64 g3ifcmd_cml_err_mask;
-/*0x09820*/ u64 g3ifcmd_cml_err_alarm;
- u8 unused09b00[0x09b00-0x09828];
-
-/*0x09b00*/ u64 vpath_to_vplane_map[17];
-#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \
- vxge_vBIT(val, 3, 5)
- u8 unused09c30[0x09c30-0x09b88];
-
-/*0x09c30*/ u64 xgxs_cfg_port[2];
-#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27)
-#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4)
-/*0x09c40*/ u64 xgxs_rxber_cfg_port[2];
-#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \
- vxge_vBIT(val, 16, 48)
-/*0x09c50*/ u64 xgxs_rxber_status_port[2];
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \
- vxge_vBIT(val, 48, 16)
-/*0x09c60*/ u64 xgxs_status_port[2];
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \
- vxge_vBIT(val, 32, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \
- vxge_vBIT(val, 36, 4)
-/*0x09c70*/ u64 xgxs_pma_reset_port[2];
-#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8)
- u8 unused09c90[0x09c90-0x09c80];
-
-/*0x09c90*/ u64 xgxs_static_cfg_port[2];
-#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3)
- u8 unused09d40[0x09d40-0x09ca0];
-
-/*0x09d40*/ u64 xgxs_info_port[2];
-#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32)
-/*0x09d50*/ u64 ratemgmt_cfg_port[2];
-#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19)
-/*0x09d60*/ u64 ratemgmt_status_port[2];
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3)
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11)
- u8 unused09d80[0x09d80-0x09d70];
-
-/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2];
-#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7)
-/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2];
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \
- vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35)
-/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2];
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \
- vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \
- vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35)
-/*0x09db0*/ u64 anbe_cfg_port[2];
-#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2)
-#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2)
-/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2];
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32)
- u8 unused09de0[0x09de0-0x09dd0];
-
-/*0x09de0*/ u64 anbe_fw_mstr_port[2];
-#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3)
-#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7)
-/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \
- vxge_mBIT(3)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \
- vxge_mBIT(7)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \
- vxge_mBIT(11)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \
- vxge_mBIT(15)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \
- vxge_vBIT(val, 18, 6)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \
- vxge_mBIT(27)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \
- vxge_mBIT(35)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \
- vxge_mBIT(39)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \
- vxge_mBIT(43)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \
- vxge_mBIT(47)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \
-vxge_mBIT(51)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \
- vxge_mBIT(55)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \
- vxge_vBIT(val, 56, 4)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \
- vxge_vBIT(val, 60, 4)
-/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \
- vxge_mBIT(32)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \
- vxge_mBIT(33)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \
- vxge_mBIT(40)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \
- vxge_mBIT(41)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \
- vxge_mBIT(42)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \
- vxge_mBIT(50)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \
- vxge_vBIT(val, 54, 5)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
- vxge_vBIT(val, 59, 5)
-/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused09e30[0x09e30-0x09e20];
-
-/*0x09e30*/ u64 antp_gen_cfg_port[2];
-/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \
- vxge_vBIT(val, 10, 6)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \
- vxge_mBIT(23)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \
- vxge_mBIT(27)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \
- vxge_mBIT(35)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \
- vxge_mBIT(43)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \
- vxge_mBIT(51)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \
- vxge_mBIT(59)
-/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \
- vxge_vBIT(val, 4, 7)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
- vxge_vBIT(val, 11, 5)
-/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \
- vxge_vBIT(val, 5, 11)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \
- vxge_vBIT(val, 32, 16)
-/*0x09e70*/ u64 mdio_mgr_access_port[2];
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63)
- u8 unused0a200[0x0a200-0x09e80];
-/*0x0a200*/ u64 xmac_vsport_choices_vh[17];
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
- u8 unused0a400[0x0a400-0x0a288];
-
-/*0x0a400*/ u64 rx_thresh_cfg_vp[17];
-#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8)
- u8 unused0ac90[0x0ac90-0x0a488];
-} __packed;
-
-/*VXGE_HW_SRPCIM_REGS_H*/
-struct vxge_hw_srpcim_reg {
-
-/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh;
-#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \
- vxge_vBIT(val, 0, 32)
- u8 unused00100[0x00100-0x00008];
-
-/*0x00100*/ u64 srpcim_pcipif_int_status;
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3)
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7)
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \
- BIT(11)
-/*0x00108*/ u64 srpcim_pcipif_int_mask;
-/*0x00110*/ u64 mrpcim_msg_reg;
-#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3)
-/*0x00118*/ u64 mrpcim_msg_mask;
-/*0x00120*/ u64 mrpcim_msg_alarm;
-/*0x00128*/ u64 vpath_msg_reg;
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16)
-/*0x00130*/ u64 vpath_msg_mask;
-/*0x00138*/ u64 vpath_msg_alarm;
- u8 unused00160[0x00160-0x00140];
-
-/*0x00160*/ u64 srpcim_to_mrpcim_wmsg;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0)
-/*0x00170*/ u64 mrpcim_to_srpcim_rmsg;
-#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel;
-#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \
- vxge_vBIT(val, 0, 5)
-/*0x00180*/ u64 vpath_to_srpcim_rmsg;
-#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \
- vxge_vBIT(val, 0, 64)
- u8 unused00200[0x00200-0x00188];
-
-/*0x00200*/ u64 srpcim_general_int_status;
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0)
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3)
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7)
- u8 unused00210[0x00210-0x00208];
-
-/*0x00210*/ u64 srpcim_general_int_mask;
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0)
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3)
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7)
- u8 unused00220[0x00220-0x00218];
-
-/*0x00220*/ u64 srpcim_ppif_int_status;
-
-/*0x00228*/ u64 srpcim_ppif_int_mask;
-/*0x00230*/ u64 srpcim_gen_errors_reg;
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23)
-/*0x00238*/ u64 srpcim_gen_errors_mask;
-/*0x00240*/ u64 srpcim_gen_errors_alarm;
-/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg;
-#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3)
-/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask;
-/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm;
-/*0x00260*/ u64 vpath_to_srpcim_alarm_reg;
-
-/*0x00268*/ u64 vpath_to_srpcim_alarm_mask;
-/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm;
- u8 unused00280[0x00280-0x00278];
-
-/*0x00280*/ u64 pf_sw_reset;
-#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8)
-/*0x00288*/ u64 srpcim_general_cfg1;
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39)
-/*0x00290*/ u64 srpcim_interrupt_cfg1;
-#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3)
- u8 unused002a8[0x002a8-0x00298];
-
-/*0x002a8*/ u64 srpcim_clear_msix_mask;
-#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0)
-/*0x002b0*/ u64 srpcim_set_msix_mask;
-#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0)
-/*0x002b8*/ u64 srpcim_clr_msix_one_shot;
-#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0)
-/*0x002c0*/ u64 srpcim_rst_in_prog;
-#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7)
-/*0x002c8*/ u64 srpcim_reg_modified;
-#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7)
-/*0x002d0*/ u64 tgt_pf_illegal_access;
-#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
-/*0x002d8*/ u64 srpcim_msix_status;
-#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3)
-#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7)
- u8 unused00880[0x00880-0x002e0];
-
-/*0x00880*/ u64 xgmac_sr_int_status;
-#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3)
-/*0x00888*/ u64 xgmac_sr_int_mask;
-/*0x00890*/ u64 asic_ntwk_sr_err_reg;
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \
- BIT(11)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15)
-/*0x00898*/ u64 asic_ntwk_sr_err_mask;
-/*0x008a0*/ u64 asic_ntwk_sr_err_alarm;
- u8 unused008c0[0x008c0-0x008a8];
-
-/*0x008c0*/ u64 xmac_vsport_choices_sr_clone;
-#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused00900[0x00900-0x008c8];
-
-/*0x00900*/ u64 mr_rqa_top_prty_for_vh;
-#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00908*/ u64 umq_vh_data_list_empty;
-#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \
- BIT(0)
-/*0x00910*/ u64 wde_cfg;
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5)
-#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6)
-#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7)
-#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14)
-#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15)
-#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16)
-#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17)
-#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19)
-#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2)
-#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2)
-
-} __packed;
-
-/*VXGE_HW_VPMGMT_REGS_H*/
-struct vxge_hw_vpmgmt_reg {
-
- u8 unused00040[0x00040-0x00000];
-
-/*0x00040*/ u64 vpath_to_func_map_cfg1;
-#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \
- vxge_vBIT(val, 3, 5)
-/*0x00048*/ u64 vpath_is_first;
-#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3)
-/*0x00050*/ u64 srpcim_to_vpath_wmsg;
-#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig;
-#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \
- vxge_mBIT(0)
- u8 unused00100[0x00100-0x00060];
-
-/*0x00100*/ u64 tim_vpath_assignment;
-#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
- u8 unused00140[0x00140-0x00108];
-
-/*0x00140*/ u64 rqa_top_prty_for_vp;
-#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \
- vxge_vBIT(val, 59, 5)
- u8 unused001c0[0x001c0-0x00148];
-
-/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone;
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \
- vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \
- vxge_mBIT(23)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \
- vxge_mBIT(39)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \
- vxge_mBIT(43)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \
- vxge_mBIT(47)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \
- vxge_mBIT(51)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \
- vxge_mBIT(55)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \
- vxge_mBIT(59)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63)
-/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone;
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59)
-/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone;
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \
- vxge_vBIT(val, 5, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \
- vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \
- vxge_vBIT(val, 13, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \
- vxge_vBIT(val, 17, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \
- vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \
- vxge_vBIT(val, 25, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \
- vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \
- vxge_vBIT(val, 33, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \
- vxge_vBIT(val, 37, 3)
-/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3];
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \
- vxge_mBIT(27)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \
- vxge_vBIT(val, 50, 14)
-/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3];
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \
- vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \
- vxge_vBIT(val, 20, 16)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \
- vxge_mBIT(39)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \
- vxge_mBIT(43)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \
- vxge_mBIT(59)
- u8 unused00240[0x00240-0x00208];
-
-/*0x00240*/ u64 xmac_vsport_choices_vp;
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
- u8 unused00260[0x00260-0x00248];
-
-/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone;
-#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3)
-#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \
- vxge_mBIT(11)
-/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2];
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \
- vxge_mBIT(3)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \
- vxge_mBIT(11)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15)
-/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone;
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \
- vxge_vBIT(val, 2, 2)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \
- vxge_vBIT(val, 28, 4)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \
- vxge_vBIT(val, 32, 4)
-/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone;
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \
- vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \
- vxge_vBIT(val, 32, 16)
-/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone;
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \
- vxge_vBIT(val, 8, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15)
-/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3];
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15)
- u8 unused002c0[0x002c0-0x002a8];
-
-/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone;
-#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7)
-/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3];
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7)
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
- u8 unused00300[0x00300-0x002e0];
-
-/*0x00300*/ u64 wol_mp_crc;
-#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63)
-/*0x00308*/ u64 wol_mp_mask_a;
-#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64)
-/*0x00310*/ u64 wol_mp_mask_b;
-#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64)
- u8 unused00360[0x00360-0x00318];
-
-/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone;
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3)
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7)
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11)
-/*0x00368*/ u64 rx_datapath_util_vp_clone;
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \
- vxge_vBIT(val, 7, 9)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \
- vxge_vBIT(val, 24, 4)
- u8 unused00380[0x00380-0x00370];
-
-/*0x00380*/ u64 tx_datapath_util_vp_clone;
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \
- vxge_vBIT(val, 7, 9)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \
- vxge_vBIT(val, 24, 4)
-
-} __packed;
-
-struct vxge_hw_vpath_reg {
-
- u8 unused00300[0x00300];
-
-/*0x00300*/ u64 usdc_vpath;
-#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32)
- u8 unused00a00[0x00a00-0x00308];
-
-/*0x00a00*/ u64 wrdma_alarm_status;
-#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1)
-/*0x00a08*/ u64 wrdma_alarm_mask;
- u8 unused00a30[0x00a30-0x00a10];
-
-/*0x00a30*/ u64 prc_alarm_reg;
-#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0)
-#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1)
-#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2)
-#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3)
-/*0x00a38*/ u64 prc_alarm_mask;
-/*0x00a40*/ u64 prc_alarm_alarm;
-/*0x00a48*/ u64 prc_cfg1;
-#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29)
-#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34)
-#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35)
-#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36)
-#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37)
-#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39)
-#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2)
- u8 unused00a60[0x00a60-0x00a50];
-
-/*0x00a60*/ u64 prc_cfg4;
-#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7)
-#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22)
-#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23)
-#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31)
-#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32)
-#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36)
-#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37)
-#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24)
-/*0x00a68*/ u64 prc_cfg5;
-#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61)
-/*0x00a70*/ u64 prc_cfg6;
-#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0)
-#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2)
-#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5)
-#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8)
-#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
-#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
-#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
-#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
-/*0x00a78*/ u64 prc_cfg7;
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
-#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12)
-#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14)
-#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5)
-/*0x00a80*/ u64 tim_dest_addr;
-#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64)
-/*0x00a88*/ u64 prc_rxd_doorbell;
-#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16)
-/*0x00a90*/ u64 rqa_prty_for_vp;
-#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5)
-/*0x00a98*/ u64 rxdmem_size;
-#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13)
-/*0x00aa0*/ u64 frm_in_progress_cnt;
-#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00aa8*/ u64 rx_multi_cast_stats;
-#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16)
-/*0x00ab0*/ u64 rx_frm_transferred;
-#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x00ab8*/ u64 rxd_returned;
-#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16)
- u8 unused00c00[0x00c00-0x00ac0];
-
-/*0x00c00*/ u64 kdfc_fifo_trpl_partition;
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15)
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15)
-/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl;
-#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7)
-/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c40*/ u64 kdfc_trpl_fifo_offset;
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15)
-/*0x00c48*/ u64 kdfc_drbl_triplet_total;
-#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \
- vxge_vBIT(val, 17, 15)
- u8 unused00c60[0x00c60-0x00c50];
-
-/*0x00c60*/ u64 usdc_drbl_ctrl;
-#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23)
-/*0x00c68*/ u64 usdc_vp_ready;
-#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7)
-#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15)
-#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23)
-/*0x00c70*/ u64 kdfc_status;
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0)
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1)
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2)
- u8 unused00c80[0x00c80-0x00c78];
-
-/*0x00c80*/ u64 xmac_rpa_vcfg;
-#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15)
-#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19)
-#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23)
-/*0x00c88*/ u64 rxmac_vcfg0;
-#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14)
-#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19)
-#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14)
-#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43)
-#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51)
-#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55)
-/*0x00c90*/ u64 rxmac_vcfg1;
-#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2)
-#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47)
-#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51)
-/*0x00c98*/ u64 rts_access_steer_ctrl;
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8)
-/*0x00ca0*/ u64 rts_access_steer_data0;
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x00ca8*/ u64 rts_access_steer_data1;
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64)
- u8 unused00d00[0x00d00-0x00cb0];
-
-/*0x00d00*/ u64 xmac_vsport_choice;
-#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5)
-/*0x00d08*/ u64 xmac_stats_cfg;
-/*0x00d10*/ u64 xmac_stats_access_cmd;
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15)
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
-/*0x00d18*/ u64 xmac_stats_access_data;
-#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x00d20*/ u64 asic_ntwk_vp_ctrl;
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55)
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63)
- u8 unused00d30[0x00d30-0x00d28];
-
-/*0x00d30*/ u64 xgmac_vp_int_status;
-#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \
- vxge_mBIT(3)
-/*0x00d38*/ u64 xgmac_vp_int_mask;
-/*0x00d40*/ u64 asic_ntwk_vp_err_reg;
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \
- vxge_mBIT(11)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \
- vxge_mBIT(15)
-#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \
- vxge_mBIT(19)
-#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
-/*0x00d48*/ u64 asic_ntwk_vp_err_mask;
-/*0x00d50*/ u64 asic_ntwk_vp_err_alarm;
- u8 unused00d80[0x00d80-0x00d58];
-
-/*0x00d80*/ u64 rtdma_bw_ctrl;
-#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39)
-#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18)
-/*0x00d88*/ u64 rtdma_rd_optimization_ctrl;
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19)
-#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \
- vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \
- vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \
- vxge_vBIT(val, 37, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \
- vxge_vBIT(val, 61, 3)
-/*0x00d90*/ u64 pda_pcc_job_monitor;
-#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7)
-/*0x00d98*/ u64 tx_protocol_assist_cfg;
-#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6)
-#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7)
- u8 unused01000[0x01000-0x00da0];
-
-/*0x01000*/ u64 tim_cfg1_int_num[4];
-#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26)
-#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7)
-/*0x01020*/ u64 tim_cfg2_int_num[4];
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16)
-/*0x01040*/ u64 tim_cfg3_int_num[4];
-#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0)
-#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4)
-#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26)
-#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6)
-#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26)
-/*0x01060*/ u64 tim_wrkld_clc;
-#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43)
-#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7)
-/*0x01068*/ u64 tim_bitmap;
-#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32)
-#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33)
-/*0x01070*/ u64 tim_ring_assn;
-#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2)
-/*0x01078*/ u64 tim_remap;
-#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5)
-#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6)
-#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7)
-#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5)
-/*0x01080*/ u64 tim_vpath_map;
-#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
-/*0x01088*/ u64 tim_pci_cfg;
-#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7)
-#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15)
-#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23)
-#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31)
- u8 unused01100[0x01100-0x01090];
-
-/*0x01100*/ u64 sgrp_assign;
-#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64)
-/*0x01108*/ u64 sgrp_aoa_and_result;
-#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \
- vxge_vBIT(val, 0, 64)
-/*0x01110*/ u64 rpe_pci_cfg;
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9)
-#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10)
-#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31)
-/*0x01118*/ u64 rpe_lro_cfg;
-#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7)
-#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11)
-#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15)
-#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23)
-/*0x01120*/ u64 pe_mr2vp_ack_blk_limit;
-#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32)
-/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit;
-#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \
- vxge_vBIT(val, 32, 32)
-/*0x01130*/ u64 txpe_pci_nce_cfg;
-#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55)
-#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63)
- u8 unused01180[0x01180-0x01138];
-
-/*0x01180*/ u64 msg_qpad_en_cfg;
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3)
-#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7)
-#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23)
-#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31)
-/*0x01188*/ u64 msg_pci_cfg;
-#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3)
-#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7)
-#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11)
-#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15)
-/*0x01190*/ u64 umqdmq_ir_init;
-#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x01198*/ u64 dmq_ir_int;
-#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6)
-#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7)
-#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
-/*0x011a0*/ u64 dmq_bwr_init_add;
-#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
-/*0x011a8*/ u64 dmq_bwr_init_byte;
-#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
-/*0x011b0*/ u64 dmq_ir;
-#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8)
-/*0x011b8*/ u64 umq_int;
-#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6)
-#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7)
-#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
-/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init;
-#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8)
-/*0x011c8*/ u64 umq_bwr_pfch_ctrl;
-#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3)
-/*0x011d0*/ u64 umq_mr2vp_bwr_eol;
-#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32)
-/*0x011d8*/ u64 umq_bwr_init_add;
-#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
-/*0x011e0*/ u64 umq_bwr_init_byte;
-#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
-/*0x011e8*/ u64 gendma_int;
-/*0x011f0*/ u64 umqdmq_ir_init_notify;
-#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
-/*0x011f8*/ u64 dmq_init_notify;
-#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
-/*0x01200*/ u64 umq_init_notify;
-#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
- u8 unused01380[0x01380-0x01208];
-
-/*0x01380*/ u64 tpa_cfg;
-#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7)
-#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11)
-#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15)
- u8 unused01400[0x01400-0x01388];
-
-/*0x01400*/ u64 tx_vp_reset_discarded_frms;
-#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \
- vxge_vBIT(val, 48, 16)
- u8 unused01480[0x01480-0x01408];
-
-/*0x01480*/ u64 fau_rpa_vcfg;
-#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7)
-#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11)
-#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15)
- u8 unused014d0[0x014d0-0x01488];
-
-/*0x014d0*/ u64 dbg_stats_rx_mpa;
-#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16)
-/*0x014d8*/ u64 dbg_stats_rx_fau;
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused014f0[0x014f0-0x014e0];
-
-/*0x014f0*/ u64 fbmc_vp_rdy;
-#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0)
- u8 unused01e00[0x01e00-0x014f8];
-
-/*0x01e00*/ u64 vpath_pcipif_int_status;
-#define \
-VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \
- vxge_mBIT(3)
-#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \
- vxge_mBIT(7)
-/*0x01e08*/ u64 vpath_pcipif_int_mask;
- u8 unused01e20[0x01e20-0x01e10];
-
-/*0x01e20*/ u64 srpcim_msg_to_vpath_reg;
-#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \
- vxge_mBIT(3)
-/*0x01e28*/ u64 srpcim_msg_to_vpath_mask;
-/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm;
- u8 unused01ea0[0x01ea0-0x01e38];
-
-/*0x01ea0*/ u64 vpath_to_srpcim_wmsg;
-#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig;
-#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \
- vxge_mBIT(0)
- u8 unused02000[0x02000-0x01eb0];
-
-/*0x02000*/ u64 vpath_general_int_status;
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19)
-/*0x02008*/ u64 vpath_general_int_mask;
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19)
-/*0x02010*/ u64 vpath_ppif_int_status;
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \
- vxge_mBIT(3)
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \
- vxge_mBIT(7)
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \
- vxge_mBIT(11)
-#define \
-VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \
- vxge_mBIT(15)
-#define \
-VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \
- vxge_mBIT(19)
-/*0x02018*/ u64 vpath_ppif_int_mask;
-/*0x02020*/ u64 kdfcctl_errors_reg;
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39)
-/*0x02028*/ u64 kdfcctl_errors_mask;
-/*0x02030*/ u64 kdfcctl_errors_alarm;
- u8 unused02040[0x02040-0x02038];
-
-/*0x02040*/ u64 general_errors_reg;
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3)
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7)
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11)
-#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15)
-#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19)
-#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27)
-#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31)
-/*0x02048*/ u64 general_errors_mask;
-/*0x02050*/ u64 general_errors_alarm;
-/*0x02058*/ u64 pci_config_errors_reg;
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3)
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7)
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11)
-/*0x02060*/ u64 pci_config_errors_mask;
-/*0x02068*/ u64 pci_config_errors_alarm;
-/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg;
-#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \
- vxge_mBIT(3)
-/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask;
-/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm;
-/*0x02088*/ u64 srpcim_to_vpath_alarm_reg;
-#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x02090*/ u64 srpcim_to_vpath_alarm_mask;
-/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm;
- u8 unused02108[0x02108-0x020a0];
-
-/*0x02108*/ u64 kdfcctl_status;
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8)
-/*0x02110*/ u64 rsthdlr_status;
-#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3)
-#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2)
-/*0x02118*/ u64 fifo0_status;
-#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12)
-/*0x02120*/ u64 fifo1_status;
-#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12)
-/*0x02128*/ u64 fifo2_status;
-#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12)
- u8 unused02158[0x02158-0x02130];
-
-/*0x02158*/ u64 tgt_illegal_access;
-#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
- u8 unused02200[0x02200-0x02160];
-
-/*0x02200*/ u64 vpath_general_cfg1;
-#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3)
-#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11)
-#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63)
-/*0x02208*/ u64 vpath_general_cfg2;
-#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3)
-/*0x02210*/ u64 vpath_general_cfg3;
-#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3)
- u8 unused02220[0x02220-0x02218];
-
-/*0x02220*/ u64 kdfcctl_cfg0;
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39)
-
- u8 unused02268[0x02268-0x02228];
-
-/*0x02268*/ u64 stats_cfg;
-#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57)
-/*0x02270*/ u64 interrupt_cfg0;
-#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7)
- u8 unused02280[0x02280-0x02278];
-
-/*0x02280*/ u64 interrupt_cfg2;
-#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
-/*0x02288*/ u64 one_shot_vect0_en;
-#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3)
-/*0x02290*/ u64 one_shot_vect1_en;
-#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3)
-/*0x02298*/ u64 one_shot_vect2_en;
-#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3)
-/*0x022a0*/ u64 one_shot_vect3_en;
-#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3)
- u8 unused022b0[0x022b0-0x022a8];
-
-/*0x022b0*/ u64 pci_config_access_cfg1;
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12)
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15)
-/*0x022b8*/ u64 pci_config_access_cfg2;
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0)
-/*0x022c0*/ u64 pci_config_access_status;
-#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0)
-#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32)
- u8 unused02300[0x02300-0x022c8];
-
-/*0x02300*/ u64 vpath_debug_stats0;
-#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32)
-/*0x02308*/ u64 vpath_debug_stats1;
-#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32)
-/*0x02310*/ u64 vpath_debug_stats2;
-#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32)
-/*0x02318*/ u64 vpath_debug_stats3;
-#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \
- vxge_vBIT(val, 0, 64)
-/*0x02320*/ u64 vpath_debug_stats4;
-#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \
- vxge_vBIT(val, 0, 64)
-/*0x02328*/ u64 vpath_debug_stats5;
-#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
-/*0x02330*/ u64 vpath_debug_stats6;
-#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
-/*0x02338*/ u64 vpath_genstats_count01;
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02340*/ u64 vpath_genstats_count23;
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02348*/ u64 vpath_genstats_count4;
-#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02350*/ u64 vpath_genstats_count5;
-#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused02648[0x02648-0x02358];
-} __packed;
-
-#define VXGE_HW_EEPROM_SIZE (0x01 << 11)
-
-/* Capability lists */
-#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */
-#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */
-#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
deleted file mode 100644
index ee164970b267..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ /dev/null
@@ -1,2428 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/prefetch.h>
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-main.h"
-
-/*
- * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
- * @vp: Virtual Path handle.
- *
- * Enable vpath interrupts. The function is to be executed the last in
- * vpath initialization sequence.
- *
- * See also: vxge_hw_vpath_intr_disable()
- */
-enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- enum vxge_hw_status status = VXGE_HW_OK;
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_ppif_int_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->wrdma_alarm_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->xgmac_vp_int_status);
-
- readq(&vp_reg->vpath_general_int_status);
-
- /* Mask unwanted interrupts */
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_mask);
-
- /* Unmask the individual interrupts */
-
- writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
- &vp_reg->general_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
- &vp_reg->kdfcctl_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
- &vp_reg->prc_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
-
- if (vpath->hldev->first_vp_id != vpath->vp_id)
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_mask);
- else
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
- VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
- VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_pio_mem_write32_upper(0,
- &vp_reg->vpath_general_int_mask);
-exit:
- return status;
-
-}
-
-/*
- * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
- * @vp: Virtual Path handle.
- *
- * Disable vpath interrupts. The function is to be executed the last in
- * vpath initialization sequence.
- *
- * See also: vxge_hw_vpath_intr_enable()
- */
-enum vxge_hw_status vxge_hw_vpath_intr_disable(
- struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_general_int_mask);
-
- writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_ppif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->wrdma_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->xgmac_vp_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_mask);
-
-exit:
- return status;
-}
-
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
- u64 val64;
-
- if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
- return;
-
- vp_reg = fifo->vp_reg;
- config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
-
- if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
- config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- fifo->tim_tti_cfg1_saved = val64;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- }
-}
-
-void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
-{
- u64 val64 = ring->tim_rti_cfg1_saved;
-
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- ring->tim_rti_cfg1_saved = val64;
- writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-}
-
-void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
-{
- u64 val64 = fifo->tim_tti_cfg3_saved;
- u64 timer = (fifo->rtimer * 1000) / 272;
-
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
- if (timer)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
- VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
-
- writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
- /* tti_cfg3_saved is not updated again because it is
- * initialized at one place only - init time.
- */
-}
-
-void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
-{
- u64 val64 = ring->tim_rti_cfg3_saved;
- u64 timer = (ring->rtimer * 1000) / 272;
-
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
- if (timer)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
- VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
-
- writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
- /* rti_cfg3_saved is not updated again because it is
- * initialized at one place only - init time.
- */
-}
-
-/**
- * vxge_hw_channel_msix_mask - Mask MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSIX ID
- *
- * The function masks the msix interrupt for the given msix_id
- *
- * Returns: 0
- */
-void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->set_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- *
- * Returns: 0
- */
-void
-vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- * if configured in MSIX oneshot mode
- *
- * Returns: 0
- */
-void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
-{
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
-}
-
-/**
- * vxge_hw_device_set_intr_type - Updates the configuration
- * with new interrupt type.
- * @hldev: HW device handle.
- * @intr_mode: New interrupt type
- */
-u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
-{
-
- if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (intr_mode != VXGE_HW_INTR_MODE_DEF))
- intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
-
- hldev->config.intr_mode = intr_mode;
- return intr_mode;
-}
-
-/**
- * vxge_hw_device_intr_enable - Enable interrupts.
- * @hldev: HW device handle.
- *
- * Enable Titan interrupts. The function is to be executed the last in
- * Titan initialization sequence.
- *
- * See also: vxge_hw_device_intr_disable()
- */
-void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
-{
- u32 i;
- u64 val64;
- u32 val32;
-
- vxge_hw_device_mask_all(hldev);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- vxge_hw_vpath_intr_enable(
- VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
- }
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
- val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
-
- if (val64 != 0) {
- writeq(val64, &hldev->common_reg->tim_int_status0);
-
- writeq(~val64, &hldev->common_reg->tim_int_mask0);
- }
-
- val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
-
- if (val32 != 0) {
- __vxge_hw_pio_mem_write32_upper(val32,
- &hldev->common_reg->tim_int_status1);
-
- __vxge_hw_pio_mem_write32_upper(~val32,
- &hldev->common_reg->tim_int_mask1);
- }
- }
-
- val64 = readq(&hldev->common_reg->titan_general_int_status);
-
- vxge_hw_device_unmask_all(hldev);
-}
-
-/**
- * vxge_hw_device_intr_disable - Disable Titan interrupts.
- * @hldev: HW device handle.
- *
- * Disable Titan interrupts.
- *
- * See also: vxge_hw_device_intr_enable()
- */
-void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
-{
- u32 i;
-
- vxge_hw_device_mask_all(hldev);
-
- /* mask all the tim interrupts */
- writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
- __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
- &hldev->common_reg->tim_int_mask1);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- vxge_hw_vpath_intr_disable(
- VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
- }
-}
-
-/**
- * vxge_hw_device_mask_all - Mask all device interrupts.
- * @hldev: HW device handle.
- *
- * Mask all device interrupts.
- *
- * See also: vxge_hw_device_unmask_all()
- */
-void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
-{
- u64 val64;
-
- val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
- VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->titan_mask_all_int);
-}
-
-/**
- * vxge_hw_device_unmask_all - Unmask all device interrupts.
- * @hldev: HW device handle.
- *
- * Unmask all device interrupts.
- *
- * See also: vxge_hw_device_mask_all()
- */
-void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
-{
- u64 val64 = 0;
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
- val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->titan_mask_all_int);
-}
-
-/**
- * vxge_hw_device_flush_io - Flush io writes.
- * @hldev: HW device handle.
- *
- * The function performs a read operation to flush io writes.
- *
- * Returns: void
- */
-void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
-{
- readl(&hldev->common_reg->titan_general_int_status);
-}
-
-/**
- * __vxge_hw_device_handle_error - Handle error
- * @hldev: HW device
- * @vp_id: Vpath Id
- * @type: Error type. Please see enum vxge_hw_event{}
- *
- * Handle error.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
- enum vxge_hw_event type)
-{
- switch (type) {
- case VXGE_HW_EVENT_UNKNOWN:
- break;
- case VXGE_HW_EVENT_RESET_START:
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- goto out;
- case VXGE_HW_EVENT_ALARM_CLEARED:
- goto out;
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- case VXGE_HW_EVENT_CRITICAL_ERR:
- case VXGE_HW_EVENT_SERR:
- break;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- break;
- default:
- vxge_assert(0);
- goto out;
- }
-
- /* notify driver */
- if (hldev->uld_callbacks->crit_err)
- hldev->uld_callbacks->crit_err(hldev,
- type, vp_id);
-out:
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_down_ind
- * @hldev: HW device handle.
- *
- * Link down indication handler. The function is invoked by HW when
- * Titan indicates that the link is down.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_DOWN)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_DOWN;
-
- /* notify driver */
- if (hldev->uld_callbacks->link_down)
- hldev->uld_callbacks->link_down(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_up_ind
- * @hldev: HW device handle.
- *
- * Link up indication handler. The function is invoked by HW when
- * Titan indicates that the link is up for programmable amount of time.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_UP)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_UP;
-
- /* notify driver */
- if (hldev->uld_callbacks->link_up)
- hldev->uld_callbacks->link_up(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
-{
- u64 val64;
- u64 alarm_status;
- u64 pic_status;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
- u64 mask64;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath == NULL) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out2;
- }
-
- hldev = vpath->hldev;
- vp_reg = vpath->vp_reg;
- alarm_status = readq(&vp_reg->vpath_general_int_status);
-
- if (alarm_status == VXGE_HW_ALL_FOXES) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
- alarm_event);
- goto out;
- }
-
- sw_stats = vpath->sw_stats;
-
- if (alarm_status & ~(
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
- sw_stats->error_stats.unknown_alarms++;
-
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out;
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
-
- val64 = readq(&vp_reg->xgmac_vp_int_status);
-
- if (val64 &
- VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
-
- val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
- ))) {
- sw_stats->error_stats.network_sustained_fault++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_down_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_DOWN, alarm_event);
- }
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
- ))) {
-
- sw_stats->error_stats.network_sustained_ok++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_up_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_UP, alarm_event);
- }
-
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
-
- if (skip_alarms)
- return VXGE_HW_OK;
- }
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
-
- pic_status = readq(&vp_reg->vpath_ppif_int_status);
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
-
- val64 = readq(&vp_reg->general_errors_reg);
- mask64 = readq(&vp_reg->general_errors_mask);
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
- ~mask64) {
- sw_stats->error_stats.ini_serr_det++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_SERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
- ~mask64) {
- sw_stats->error_stats.dblgen_fifo0_overflow++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
- ~mask64)
- sw_stats->error_stats.statsb_pif_chain_error++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
- ~mask64)
- sw_stats->error_stats.statsb_drop_timeout++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
- ~mask64)
- sw_stats->error_stats.target_illegal_access++;
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
-
- val64 = readq(&vp_reg->kdfcctl_errors_reg);
- mask64 = readq(&vp_reg->kdfcctl_errors_mask);
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_poison++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->kdfcctl_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
-
- val64 = readq(&vp_reg->wrdma_alarm_status);
-
- if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
-
- val64 = readq(&vp_reg->prc_alarm_reg);
- mask64 = readq(&vp_reg->prc_alarm_mask);
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
- ~mask64)
- sw_stats->error_stats.prc_ring_bumps++;
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
- ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
- & ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_abort++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
- & ~mask64) {
- sw_stats->error_stats.prc_quanta_size_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
- }
-out:
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-out2:
- if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
- (alarm_event == VXGE_HW_EVENT_UNKNOWN))
- return VXGE_HW_OK;
-
- __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
-
- if (alarm_event == VXGE_HW_EVENT_SERR)
- return VXGE_HW_ERR_CRITICAL;
-
- return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
- VXGE_HW_ERR_SLOT_FREEZE :
- (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
- VXGE_HW_ERR_VPATH;
-}
-
-/**
- * vxge_hw_device_begin_irq - Begin IRQ processing.
- * @hldev: HW device handle.
- * @skip_alarms: Do not clear the alarms
- * @reason: "Reason" for the interrupt, the value of Titan's
- * general_int_status register.
- *
- * The function performs two actions, It first checks whether (shared IRQ) the
- * interrupt was raised by the device. Next, it masks the device interrupts.
- *
- * Note:
- * vxge_hw_device_begin_irq() does not flush MMIO writes through the
- * bridge. Therefore, two back-to-back interrupts are potentially possible.
- *
- * Returns: 0, if the interrupt is not "ours" (note that in this case the
- * device remain enabled).
- * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
- * status.
- */
-enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
- u32 skip_alarms, u64 *reason)
-{
- u32 i;
- u64 val64;
- u64 adapter_status;
- u64 vpath_mask;
- enum vxge_hw_status ret = VXGE_HW_OK;
-
- val64 = readq(&hldev->common_reg->titan_general_int_status);
-
- if (unlikely(!val64)) {
- /* not Titan interrupt */
- *reason = 0;
- ret = VXGE_HW_ERR_WRONG_IRQ;
- goto exit;
- }
-
- if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
-
- adapter_status = readq(&hldev->common_reg->adapter_status);
-
- if (adapter_status == VXGE_HW_ALL_FOXES) {
-
- __vxge_hw_device_handle_error(hldev,
- NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
- *reason = 0;
- ret = VXGE_HW_ERR_SLOT_FREEZE;
- goto exit;
- }
- }
-
- hldev->stats.sw_dev_info_stats.total_intr_cnt++;
-
- *reason = val64;
-
- vpath_mask = hldev->vpaths_deployed >>
- (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
-
- if (val64 &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
- hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
-
- return VXGE_HW_OK;
- }
-
- hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
-
- if (unlikely(val64 &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
-
- enum vxge_hw_status error_level = VXGE_HW_OK;
-
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- ret = __vxge_hw_vpath_alarm_process(
- &hldev->virtual_paths[i], skip_alarms);
-
- error_level = VXGE_HW_SET_LEVEL(ret, error_level);
-
- if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
- (ret == VXGE_HW_ERR_SLOT_FREEZE)))
- break;
- }
-
- ret = error_level;
- }
-exit:
- return ret;
-}
-
-/**
- * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
- * condition that has caused the Tx and RX interrupt.
- * @hldev: HW device.
- *
- * Acknowledge (that is, clear) the condition that has caused
- * the Tx and Rx interrupt.
- * See also: vxge_hw_device_begin_irq(),
- * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
- */
-void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
-{
-
- if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
- &hldev->common_reg->tim_int_status0);
- }
-
- if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
- &hldev->common_reg->tim_int_status1);
- }
-}
-
-/*
- * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
- * @channel: Channel
- * @dtrh: Buffer to return the DTR pointer
- *
- * Allocates a dtr from the reserve array. If the reserve array is empty,
- * it swaps the reserve and free arrays.
- *
- */
-static enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
-{
- if (channel->reserve_ptr - channel->reserve_top > 0) {
-_alloc_after_swap:
- *dtrh = channel->reserve_arr[--channel->reserve_ptr];
-
- return VXGE_HW_OK;
- }
-
- /* switch between empty and full arrays */
-
- /* the idea behind such a design is that by having free and reserved
- * arrays separated we basically separated irq and non-irq parts.
- * i.e. no additional lock need to be done when we free a resource */
-
- if (channel->length - channel->free_ptr > 0) {
- swap(channel->reserve_arr, channel->free_arr);
- channel->reserve_ptr = channel->length;
- channel->reserve_top = channel->free_ptr;
- channel->free_ptr = channel->length;
-
- channel->stats->reserve_free_swaps_cnt++;
-
- goto _alloc_after_swap;
- }
-
- channel->stats->full_cnt++;
-
- *dtrh = NULL;
- return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
-}
-
-/*
- * vxge_hw_channel_dtr_post - Post a dtr to the channel
- * @channelh: Channel
- * @dtrh: DTR pointer
- *
- * Posts a dtr to work array.
- *
- */
-static void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
-{
- vxge_assert(channel->work_arr[channel->post_index] == NULL);
-
- channel->work_arr[channel->post_index++] = dtrh;
-
- /* wrap-around */
- if (channel->post_index == channel->length)
- channel->post_index = 0;
-}
-
-/*
- * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
- * @channel: Channel
- * @dtr: Buffer to return the next completed DTR pointer
- *
- * Returns the next completed dtr with out removing it from work array
- *
- */
-void
-vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
-{
- vxge_assert(channel->compl_index < channel->length);
-
- *dtrh = channel->work_arr[channel->compl_index];
- prefetch(*dtrh);
-}
-
-/*
- * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
- * @channel: Channel handle
- *
- * Removes the next completed dtr from work array
- *
- */
-void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
-{
- channel->work_arr[channel->compl_index] = NULL;
-
- /* wrap-around */
- if (++channel->compl_index == channel->length)
- channel->compl_index = 0;
-
- channel->stats->total_compl_cnt++;
-}
-
-/*
- * vxge_hw_channel_dtr_free - Frees a dtr
- * @channel: Channel handle
- * @dtr: DTR pointer
- *
- * Returns the dtr to free array
- *
- */
-void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
-{
- channel->free_arr[--channel->free_ptr] = dtrh;
-}
-
-/*
- * vxge_hw_channel_dtr_count
- * @channel: Channel handle. Obtained via vxge_hw_channel_open().
- *
- * Retrieve number of DTRs available. This function can not be called
- * from data path. ring_initial_replenishi() is the only user.
- */
-int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
-{
- return (channel->reserve_ptr - channel->reserve_top) +
- (channel->length - channel->free_ptr);
-}
-
-/**
- * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
- * with a valid handle.
- *
- * Reserve Rx descriptor for the subsequent filling-in driver
- * and posting on the corresponding channel (@channelh)
- * via vxge_hw_ring_rxd_post().
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
- *
- */
-enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
- void **rxdh)
-{
- enum vxge_hw_status status;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- status = vxge_hw_channel_dtr_alloc(channel, rxdh);
-
- if (status == VXGE_HW_OK) {
- struct vxge_hw_ring_rxd_1 *rxdp =
- (struct vxge_hw_ring_rxd_1 *)*rxdh;
-
- rxdp->control_0 = rxdp->control_1 = 0;
- }
-
- return status;
-}
-
-/**
- * vxge_hw_ring_rxd_free - Free descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Free the reserved descriptor. This operation is "symmetrical" to
- * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
- * lifecycle.
- *
- * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
- * be:
- *
- * - reserved (vxge_hw_ring_rxd_reserve);
- *
- * - posted (vxge_hw_ring_rxd_post);
- *
- * - completed (vxge_hw_ring_rxd_next_completed);
- *
- * - and recycled again (vxge_hw_ring_rxd_free).
- *
- * For alternative state transitions and more details please refer to
- * the design doc.
- *
- */
-void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_free(channel, rxdh);
-
-}
-
-/**
- * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * This routine prepares a rxd and posts
- */
-void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_post(channel, rxdh);
-}
-
-/**
- * vxge_hw_ring_rxd_post_post - Process rxd after post.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Processes rxd after post
- */
-void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
-
- rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
-
- if (ring->stats->common_stats.usage_cnt > 0)
- ring->stats->common_stats.usage_cnt--;
-}
-
-/**
- * vxge_hw_ring_rxd_post - Post descriptor on the ring.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
- *
- * Post descriptor on the ring.
- * Prior to posting the descriptor should be filled in accordance with
- * Host/Titan interface specification for a given service (LL, etc.).
- *
- */
-void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- wmb();
- rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
-
- vxge_hw_channel_dtr_post(channel, rxdh);
-
- if (ring->stats->common_stats.usage_cnt > 0)
- ring->stats->common_stats.usage_cnt--;
-}
-
-/**
- * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Processes rxd after post with memory barrier.
- */
-void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
-{
- wmb();
- vxge_hw_ring_rxd_post_post(ring, rxdh);
-}
-
-/**
- * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle. Returned by HW.
- * @t_code: Transfer code, as per Titan User Guide,
- * Receive Descriptor Format. Returned by HW.
- *
- * Retrieve the _next_ completed descriptor.
- * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
- * driver of new completed descriptors. After that
- * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
- * completions (the very first completion is passed by HW via
- * vxge_hw_ring_callback_f).
- *
- * Implementation-wise, the driver is free to call
- * vxge_hw_ring_rxd_next_completed either immediately from inside the
- * ring callback, or in a deferred fashion and separate (from HW)
- * context.
- *
- * Non-zero @t_code means failure to fill-in receive buffer(s)
- * of the descriptor.
- * For instance, parity error detected during the data transfer.
- * In this case Titan will complete the descriptor and indicate
- * for the host that the received data is not to be used.
- * For details please refer to Titan User Guide.
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
- * are currently available for processing.
- *
- * See also: vxge_hw_ring_callback_f{},
- * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
- */
-enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
- struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
-{
- struct __vxge_hw_channel *channel;
- struct vxge_hw_ring_rxd_1 *rxdp;
- enum vxge_hw_status status = VXGE_HW_OK;
- u64 control_0, own;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_try_complete(channel, rxdh);
-
- rxdp = *rxdh;
- if (rxdp == NULL) {
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
- goto exit;
- }
-
- control_0 = rxdp->control_0;
- own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
- *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
-
- /* check whether it is not the end */
- if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
-
- vxge_assert((rxdp)->host_control !=
- 0);
-
- ++ring->cmpl_cnt;
- vxge_hw_channel_dtr_complete(channel);
-
- vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
-
- ring->stats->common_stats.usage_cnt++;
- if (ring->stats->common_stats.usage_max <
- ring->stats->common_stats.usage_cnt)
- ring->stats->common_stats.usage_max =
- ring->stats->common_stats.usage_cnt;
-
- status = VXGE_HW_OK;
- goto exit;
- }
-
- /* reset it. since we don't want to return
- * garbage to the driver */
- *rxdh = NULL;
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_ring_handle_tcode - Handle transfer code.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- * @t_code: One of the enumerated (and documented in the Titan user guide)
- * "transfer codes".
- *
- * Handle descriptor's transfer code. The latter comes with each completed
- * descriptor.
- *
- * Returns: one of the enum vxge_hw_status{} enumerated types.
- * VXGE_HW_OK - for success.
- * VXGE_HW_ERR_CRITICAL - when encounters critical error.
- */
-enum vxge_hw_status vxge_hw_ring_handle_tcode(
- struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- /* If the t_code is not supported and if the
- * t_code is other than 0x5 (unparseable packet
- * such as unknown UPV6 header), Drop it !!!
- */
-
- if (t_code == VXGE_HW_RING_T_CODE_OK ||
- t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
- status = VXGE_HW_OK;
- goto exit;
- }
-
- if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
- status = VXGE_HW_ERR_INVALID_TCODE;
- goto exit;
- }
-
- ring->stats->rxd_t_code_err_cnt[t_code]++;
-exit:
- return status;
-}
-
-/**
- * __vxge_hw_non_offload_db_post - Post non offload doorbell
- *
- * @fifo: fifohandle
- * @txdl_ptr: The starting location of the TxDL in host memory
- * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
- * @no_snoop: No snoop flags
- *
- * This function posts a non-offload doorbell to doorbell FIFO
- *
- */
-static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
- u64 txdl_ptr, u32 num_txds, u32 no_snoop)
-{
- writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
- VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
- VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
- &fifo->nofl_db->control_0);
-
- writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
-}
-
-/**
- * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
- * the fifo
- * @fifoh: Handle to the fifo object used for non offload send
- */
-u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
-{
- return vxge_hw_channel_dtr_count(&fifoh->channel);
-}
-
-/**
- * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
- * with a valid handle.
- * @txdl_priv: Buffer to return the pointer to per txdl space
- *
- * Reserve a single TxDL (that is, fifo descriptor)
- * for the subsequent filling-in by driver)
- * and posting on the corresponding channel (@channelh)
- * via vxge_hw_fifo_txdl_post().
- *
- * Note: it is the responsibility of driver to reserve multiple descriptors
- * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
- * carries up to configured number (fifo.max_frags) of contiguous buffers.
- *
- * Returns: VXGE_HW_OK - success;
- * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
- *
- */
-enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
- struct __vxge_hw_fifo *fifo,
- void **txdlh, void **txdl_priv)
-{
- struct __vxge_hw_channel *channel;
- enum vxge_hw_status status;
- int i;
-
- channel = &fifo->channel;
-
- status = vxge_hw_channel_dtr_alloc(channel, txdlh);
-
- if (status == VXGE_HW_OK) {
- struct vxge_hw_fifo_txd *txdp =
- (struct vxge_hw_fifo_txd *)*txdlh;
- struct __vxge_hw_fifo_txdl_priv *priv;
-
- priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
-
- /* reset the TxDL's private */
- priv->align_dma_offset = 0;
- priv->align_vaddr_start = priv->align_vaddr;
- priv->align_used_frags = 0;
- priv->frags = 0;
- priv->alloc_frags = fifo->config->max_frags;
- priv->next_txdl_priv = NULL;
-
- *txdl_priv = (void *)(size_t)txdp->host_control;
-
- for (i = 0; i < fifo->config->max_frags; i++) {
- txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
- txdp->control_0 = txdp->control_1 = 0;
- }
- }
-
- return status;
-}
-
-/**
- * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
- * descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- * @frag_idx: Index of the data buffer in the caller's scatter-gather list
- * (of buffers).
- * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
- * @size: Size of the data buffer (in bytes).
- *
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
- * All three APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
- void *txdlh, u32 frag_idx,
- dma_addr_t dma_pointer, u32 size)
-{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp, *txdp_last;
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
-
- if (frag_idx != 0)
- txdp->control_0 = txdp->control_1 = 0;
- else {
- txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
- VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
- txdp->control_1 |= fifo->interrupt_type;
- txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
- fifo->tx_intr_num);
- if (txdl_priv->frags) {
- txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
- (txdl_priv->frags - 1);
- txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
- VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
- }
- }
-
- vxge_assert(frag_idx < txdl_priv->alloc_frags);
-
- txdp->buffer_pointer = (u64)dma_pointer;
- txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
- fifo->stats->total_buffers++;
- txdl_priv->frags++;
-}
-
-/**
- * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
- *
- * Post descriptor on the 'fifo' type channel for transmission.
- * Prior to posting the descriptor should be filled in accordance with
- * Host/Titan interface specification for a given service (LL, etc.).
- *
- */
-void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
-{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp_last;
- struct vxge_hw_fifo_txd *txdp_first;
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp_first = txdlh;
-
- txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
- txdp_last->control_0 |=
- VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
- txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
-
- vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
-
- __vxge_hw_non_offload_db_post(fifo,
- (u64)txdl_priv->dma_addr,
- txdl_priv->frags - 1,
- fifo->no_snoop_bits);
-
- fifo->stats->total_posts++;
- fifo->stats->common_stats.usage_cnt++;
- if (fifo->stats->common_stats.usage_max <
- fifo->stats->common_stats.usage_cnt)
- fifo->stats->common_stats.usage_max =
- fifo->stats->common_stats.usage_cnt;
-}
-
-/**
- * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle. Returned by HW.
- * @t_code: Transfer code, as per Titan User Guide,
- * Transmit Descriptor Format.
- * Returned by HW.
- *
- * Retrieve the _next_ completed descriptor.
- * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
- * driver of new completed descriptors. After that
- * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
- * completions (the very first completion is passed by HW via
- * vxge_hw_channel_callback_f).
- *
- * Implementation-wise, the driver is free to call
- * vxge_hw_fifo_txdl_next_completed either immediately from inside the
- * channel callback, or in a deferred fashion and separate (from HW)
- * context.
- *
- * Non-zero @t_code means failure to process the descriptor.
- * The failure could happen, for instance, when the link is
- * down, in which case Titan completes the descriptor because it
- * is not able to send the data out.
- *
- * For details please refer to Titan User Guide.
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
- * are currently available for processing.
- *
- */
-enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
- struct __vxge_hw_fifo *fifo, void **txdlh,
- enum vxge_hw_fifo_tcode *t_code)
-{
- struct __vxge_hw_channel *channel;
- struct vxge_hw_fifo_txd *txdp;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- channel = &fifo->channel;
-
- vxge_hw_channel_dtr_try_complete(channel, txdlh);
-
- txdp = *txdlh;
- if (txdp == NULL) {
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
- goto exit;
- }
-
- /* check whether host owns it */
- if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
-
- vxge_assert(txdp->host_control != 0);
-
- vxge_hw_channel_dtr_complete(channel);
-
- *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
-
- if (fifo->stats->common_stats.usage_cnt > 0)
- fifo->stats->common_stats.usage_cnt--;
-
- status = VXGE_HW_OK;
- goto exit;
- }
-
- /* no more completions */
- *txdlh = NULL;
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_fifo_handle_tcode - Handle transfer code.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- * @t_code: One of the enumerated (and documented in the Titan user guide)
- * "transfer codes".
- *
- * Handle descriptor's transfer code. The latter comes with each completed
- * descriptor.
- *
- * Returns: one of the enum vxge_hw_status{} enumerated types.
- * VXGE_HW_OK - for success.
- * VXGE_HW_ERR_CRITICAL - when encounters critical error.
- */
-enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
- status = VXGE_HW_ERR_INVALID_TCODE;
- goto exit;
- }
-
- fifo->stats->txd_t_code_err_cnt[t_code]++;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_fifo_txdl_free - Free descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- *
- * Free the reserved descriptor. This operation is "symmetrical" to
- * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
- * lifecycle.
- *
- * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
- * be:
- *
- * - reserved (vxge_hw_fifo_txdl_reserve);
- *
- * - posted (vxge_hw_fifo_txdl_post);
- *
- * - completed (vxge_hw_fifo_txdl_next_completed);
- *
- * - and recycled again (vxge_hw_fifo_txdl_free).
- *
- * For alternative state transitions and more details please refer to
- * the design doc.
- *
- */
-void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
-
- vxge_hw_channel_dtr_free(channel, txdlh);
-}
-
-/**
- * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- * @duplicate_mode: Duplicate MAC address add mode. Please see
- * enum vxge_hw_vpath_mac_addr_add_mode{}
- *
- * Adds the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_add(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask,
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- for (i = 0; i < ETH_ALEN; i++) {
- data1 <<= 8;
- data1 |= (u8)macaddr[i];
-
- data2 <<= 8;
- data2 |= (u8)macaddr_mask[i];
- }
-
- switch (duplicate_mode) {
- case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
- i = 0;
- break;
- case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
- i = 1;
- break;
- case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
- i = 2;
- break;
- default:
- i = 0;
- break;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_get - Get the first mac address entry
- * @vp: Vpath handle.
- * @macaddr: First MAC address entry for this vpath in the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Get the first mac address entry for this vpath from MAC address table.
- * Return: the first mac address and mac address mask in the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data1, &data2);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry
- * @vp: Vpath handle.
- * @macaddr: Next MAC address entry for this vpath in the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Get the next mac address entry for this vpath from MAC address table.
- * Return: the next mac address and mac address mask in the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get_next(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data1, &data2);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
-
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Delete the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- for (i = 0; i < ETH_ALEN; i++) {
- data1 <<= 8;
- data1 |= (u8)macaddr[i];
-
- data2 <<= 8;
- data2 |= (u8)macaddr_mask[i];
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_delete
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
- * to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_add
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Enable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_disable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- /* Enable promiscuous mode for function 0 only */
- if (!(vpath->hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
- return VXGE_HW_OK;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
-
- val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_BCAST_EN |
- VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Disable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_enable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
-
- val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_bcast_enable - Enable broadcast
- * @vp: Vpath handle.
- *
- * Enable receiving broadcasts.
- */
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
- val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
- * @vp: Vpath handle.
- *
- * Enable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK on success.
- *
- */
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
- val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
- * @vp: Vpath handle.
- *
- * Disable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
- val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-enum vxge_hw_status vxge_hw_vpath_alarm_process(
- struct __vxge_hw_vpath_handle *vp,
- u32 skip_alarms)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
- * alrms
- * @vp: Virtual Path handle.
- * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
- * interrupts(Can be repeated). If fifo or ring are not enabled
- * the MSIX vector for that should be set to 0
- * @alarm_msix_id: MSIX vector for alarm.
- *
- * This API will associate a given MSIX vector numbers with the four TIM
- * interrupts and alarm interrupt.
- */
-void
-vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
- int alarm_msix_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- u32 vp_id = vp->vpath->vp_id;
-
- val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
- (vp_id * 4) + tim_msix_id[0]) |
- VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
- (vp_id * 4) + tim_msix_id[1]);
-
- writeq(val64, &vp_reg->interrupt_cfg0);
-
- writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
- (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
- &vp_reg->interrupt_cfg2);
-
- if (vpath->hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
- 0, 32), &vp_reg->one_shot_vect0_en);
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
- 0, 32), &vp_reg->one_shot_vect1_en);
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
- 0, 32), &vp_reg->one_shot_vect2_en);
- }
-}
-
-/**
- * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSIX ID
- *
- * The function masks the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
-}
-
-/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
- &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
- else
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
- &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
-}
-
-/**
- * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
- * @vp: Virtual Path handle.
- *
- * Mask Tx and Rx vpath interrupts.
- *
- * See also: vxge_hw_vpath_inta_mask_tx_rx()
- */
-void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
-{
- u64 tim_int_mask0[4] = {[0 ...3] = 0};
- u32 tim_int_mask1[4] = {[0 ...3] = 0};
- u64 val64;
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
- tim_int_mask1, vp->vpath->vp_id);
-
- val64 = readq(&hldev->common_reg->tim_int_mask0);
-
- if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
- &hldev->common_reg->tim_int_mask0);
- }
-
- val64 = readl(&hldev->common_reg->tim_int_mask1);
-
- if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
- &hldev->common_reg->tim_int_mask1);
- }
-}
-
-/**
- * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
- * @vp: Virtual Path handle.
- *
- * Unmask Tx and Rx vpath interrupts.
- *
- * See also: vxge_hw_vpath_inta_mask_tx_rx()
- */
-void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
-{
- u64 tim_int_mask0[4] = {[0 ...3] = 0};
- u32 tim_int_mask1[4] = {[0 ...3] = 0};
- u64 val64;
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
- tim_int_mask1, vp->vpath->vp_id);
-
- val64 = readq(&hldev->common_reg->tim_int_mask0);
-
- if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
- &hldev->common_reg->tim_int_mask0);
- }
-
- if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
- &hldev->common_reg->tim_int_mask1);
- }
-}
-
-/**
- * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
- * descriptors and process the same.
- * @ring: Handle to the ring object used for receive
- *
- * The function polls the Rx for the completed descriptors and calls
- * the driver via supplied completion callback.
- *
- * Returns: VXGE_HW_OK, if the polling is completed successful.
- * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
- * descriptors available which are yet to be processed.
- *
- * See also: vxge_hw_vpath_poll_rx()
- */
-enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
-{
- u8 t_code;
- enum vxge_hw_status status = VXGE_HW_OK;
- void *first_rxdh;
- int new_count = 0;
-
- ring->cmpl_cnt = 0;
-
- status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
- if (status == VXGE_HW_OK)
- ring->callback(ring, first_rxdh,
- t_code, ring->channel.userdata);
-
- if (ring->cmpl_cnt != 0) {
- ring->doorbell_cnt += ring->cmpl_cnt;
- if (ring->doorbell_cnt >= ring->rxds_limit) {
- /*
- * Each RxD is of 4 qwords, update the number of
- * qwords replenished
- */
- new_count = (ring->doorbell_cnt * 4);
-
- /* For each block add 4 more qwords */
- ring->total_db_cnt += ring->doorbell_cnt;
- if (ring->total_db_cnt >= ring->rxds_per_block) {
- new_count += 4;
- /* Reset total count */
- ring->total_db_cnt %= ring->rxds_per_block;
- }
- writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
- &ring->vp_reg->prc_rxd_doorbell);
- readl(&ring->common_reg->titan_general_int_status);
- ring->doorbell_cnt = 0;
- }
- }
-
- return status;
-}
-
-/**
- * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
- * @fifo: Handle to the fifo object used for non offload send
- * @skb_ptr: pointer to skb
- * @nr_skb: number of skbs
- * @more: more is coming
- *
- * The function polls the Tx for the completed descriptors and calls
- * the driver via supplied completion callback.
- *
- * Returns: VXGE_HW_OK, if the polling is completed successful.
- * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
- * descriptors available which are yet to be processed.
- */
-enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
- struct sk_buff ***skb_ptr, int nr_skb,
- int *more)
-{
- enum vxge_hw_fifo_tcode t_code;
- void *first_txdlh;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
-
- status = vxge_hw_fifo_txdl_next_completed(fifo,
- &first_txdlh, &t_code);
- if (status == VXGE_HW_OK)
- if (fifo->callback(fifo, first_txdlh, t_code,
- channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
- status = VXGE_HW_COMPLETIONS_REMAIN;
-
- return status;
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
deleted file mode 100644
index ba6f833bb059..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
+++ /dev/null
@@ -1,2290 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_TRAFFIC_H
-#define VXGE_TRAFFIC_H
-
-#include "vxge-reg.h"
-#include "vxge-version.h"
-
-#define VXGE_HW_DTR_MAX_T_CODE 16
-#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_MAX_VIRTUAL_PATHS 17
-
-#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
-
-#define VXGE_HW_DEFAULT_32 0xffffffff
-/* frames sizes */
-#define VXGE_HW_HEADER_802_2_SIZE 3
-#define VXGE_HW_HEADER_SNAP_SIZE 5
-#define VXGE_HW_HEADER_VLAN_SIZE 4
-#define VXGE_HW_MAC_HEADER_MAX_SIZE \
- (ETH_HLEN + \
- VXGE_HW_HEADER_802_2_SIZE + \
- VXGE_HW_HEADER_VLAN_SIZE + \
- VXGE_HW_HEADER_SNAP_SIZE)
-
-/* 32bit alignments */
-#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
-#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
-#define VXGE_HW_HEADER_802_2_ALIGN 3
-#define VXGE_HW_HEADER_SNAP_ALIGN 1
-
-#define VXGE_HW_L3_CKSUM_OK 0xFFFF
-#define VXGE_HW_L4_CKSUM_OK 0xFFFF
-
-/* Forward declarations */
-struct __vxge_hw_device;
-struct __vxge_hw_vpath_handle;
-struct vxge_hw_vp_config;
-struct __vxge_hw_virtualpath;
-struct __vxge_hw_channel;
-struct __vxge_hw_fifo;
-struct __vxge_hw_ring;
-struct vxge_hw_ring_attr;
-struct vxge_hw_mempool;
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-/*VXGE_HW_STATUS_H*/
-
-#define VXGE_HW_EVENT_BASE 0
-#define VXGE_LL_EVENT_BASE 100
-
-/**
- * enum vxge_hw_event- Enumerates slow-path HW events.
- * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
- * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
- * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
- * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
- * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
- * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
- * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
- * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
- * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
- * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
- * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
- * slot-freeze from the rest critical events (e.g. ECC) when it is
- * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
- *
- * enum vxge_hw_event enumerates slow-path HW eventis.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
- * vxge_uld_link_down_f{}.
- */
-enum vxge_hw_event {
- VXGE_HW_EVENT_UNKNOWN = 0,
- /* HW events */
- VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
- VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
- VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
- VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
- VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
- VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
- VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
- VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
- VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
- VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
- VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
- VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
- VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
- VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
-};
-
-#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
-
-/*
- * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
- caller.
- */
-struct vxge_hw_mempool_dma {
- dma_addr_t addr;
- struct pci_dev *handle;
- struct pci_dev *acc_handle;
-};
-
-/*
- * vxge_hw_mempool_item_f - Mempool item alloc/free callback
- * @mempoolh: Memory pool handle.
- * @memblock: Address of memory block
- * @memblock_index: Index of memory block
- * @item: Item that gets allocated or freed.
- * @index: Item's index in the memory pool.
- * @is_last: True, if this item is the last one in the pool; false - otherwise.
- * userdata: Per-pool user context.
- *
- * Memory pool allocation/deallocation callback.
- */
-
-/*
- * struct vxge_hw_mempool - Memory pool.
- */
-struct vxge_hw_mempool {
-
- void (*item_func_alloc)(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index,
- u32 is_last);
-
- void *userdata;
- void **memblocks_arr;
- void **memblocks_priv_arr;
- struct vxge_hw_mempool_dma *memblocks_dma_arr;
- struct __vxge_hw_device *devh;
- u32 memblock_size;
- u32 memblocks_max;
- u32 memblocks_allocated;
- u32 item_size;
- u32 items_max;
- u32 items_initial;
- u32 items_current;
- u32 items_per_memblock;
- void **items_arr;
- u32 items_priv_size;
-};
-
-#define VXGE_HW_MAX_INTR_PER_VP 4
-#define VXGE_HW_VPATH_INTR_TX 0
-#define VXGE_HW_VPATH_INTR_RX 1
-#define VXGE_HW_VPATH_INTR_EINTA 2
-#define VXGE_HW_VPATH_INTR_BMAP 3
-
-#define VXGE_HW_BLOCK_SIZE 4096
-
-/**
- * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
- * @intr_enable: Set to 1, if interrupt is enabled.
- * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
- * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
- * asserted, other interrupt-generating entities will cancel the
- * scheduled timer interrupt.
- * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
- * When asserted, an interrupt will be generated every time the
- * boundary timer expires, even if no traffic has been transmitted
- * on this interrupt.
- * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
- * (Re-) Interrupt Enable: When asserted, an interrupt will be
- * generated the next time the timer expires, even if no traffic has
- * been transmitted on this interrupt. (This will only happen once
- * each time that this value is written to the TIM.) This bit is
- * cleared by H/W at the end of the current-timer-interval when
- * the interrupt is triggered.
- * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
- * @util_sel: Utilization Selector. Selects which of the workload approximations
- * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
- * specified utilization etc.), selects one of
- * the 17 host configured values.
- * 0-Virtual Path 0
- * 1-Virtual Path 1
- * ...
- * 16-Virtual Path 17
- * 17-Legacy Tx network utilization, provided by TPA
- * 18-Legacy Rx network utilization, provided by FAU
- * 19-Average of legacy Rx and Tx utilization calculated from link
- * utilization values.
- * 20-31-Invalid configurations
- * 32-Host utilization for Virtual Path 0
- * 33-Host utilization for Virtual Path 1
- * ...
- * 48-Host utilization for Virtual Path 17
- * 49-Legacy Tx network utilization, provided by TPA
- * 50-Legacy Rx network utilization, provided by FAU
- * 51-Average of legacy Rx and Tx utilization calculated from
- * link utilization values.
- * 52-63-Invalid configurations
- * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
- * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
- * to 1 enables counting of TxD0 returns (signalled by PCC's),
- * towards utilization event count values.
- * @urange_a: Defines the upper limit (in percent) for this utilization range
- * to be active. This range is considered active
- * if 0 = UTIL = URNG_A
- * and the UEC_A field (below) is non-zero.
- * @uec_a: Utilization Event Count A. If this range is active, the adapter will
- * wait until UEC_A events have occurred on the interrupt before
- * generating an interrupt.
- * @urange_b: Link utilization range B.
- * @uec_b: Utilization Event Count B.
- * @urange_c: Link utilization range C.
- * @uec_c: Utilization Event Count C.
- * @urange_d: Link utilization range D.
- * @uec_d: Utilization Event Count D.
- * Traffic Interrupt Controller Module interrupt configuration.
- */
-struct vxge_hw_tim_intr_config {
-
- u32 intr_enable;
-#define VXGE_HW_TIM_INTR_ENABLE 1
-#define VXGE_HW_TIM_INTR_DISABLE 0
-#define VXGE_HW_TIM_INTR_DEFAULT 0
-
- u32 btimer_val;
-#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
-#define VXGE_HW_USE_FLASH_DEFAULT (~0)
-
- u32 timer_ac_en;
-#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
-#define VXGE_HW_TIM_TIMER_AC_DISABLE 0
-
- u32 timer_ci_en;
-#define VXGE_HW_TIM_TIMER_CI_ENABLE 1
-#define VXGE_HW_TIM_TIMER_CI_DISABLE 0
-
- u32 timer_ri_en;
-#define VXGE_HW_TIM_TIMER_RI_ENABLE 1
-#define VXGE_HW_TIM_TIMER_RI_DISABLE 0
-
- u32 rtimer_val;
-#define VXGE_HW_MIN_TIM_RTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
-
- u32 util_sel;
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
-#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
-
- u32 ltimer_val;
-#define VXGE_HW_MIN_TIM_LTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
-
- /* Line utilization interrupts */
- u32 urange_a;
-#define VXGE_HW_MIN_TIM_URANGE_A 0
-#define VXGE_HW_MAX_TIM_URANGE_A 100
-
- u32 uec_a;
-#define VXGE_HW_MIN_TIM_UEC_A 0
-#define VXGE_HW_MAX_TIM_UEC_A 65535
-
- u32 urange_b;
-#define VXGE_HW_MIN_TIM_URANGE_B 0
-#define VXGE_HW_MAX_TIM_URANGE_B 100
-
- u32 uec_b;
-#define VXGE_HW_MIN_TIM_UEC_B 0
-#define VXGE_HW_MAX_TIM_UEC_B 65535
-
- u32 urange_c;
-#define VXGE_HW_MIN_TIM_URANGE_C 0
-#define VXGE_HW_MAX_TIM_URANGE_C 100
-
- u32 uec_c;
-#define VXGE_HW_MIN_TIM_UEC_C 0
-#define VXGE_HW_MAX_TIM_UEC_C 65535
-
- u32 uec_d;
-#define VXGE_HW_MIN_TIM_UEC_D 0
-#define VXGE_HW_MAX_TIM_UEC_D 65535
-};
-
-#define VXGE_HW_STATS_OP_READ 0
-#define VXGE_HW_STATS_OP_CLEAR_STAT 1
-#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
-#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
-#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
-
-#define VXGE_HW_STATS_LOC_AGGR 17
-#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
-
-#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
-#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
-
-#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
- vxge_bVALn(bits, 32, 32)
-
-/**
- * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
- *
- * @tx_frms: Count of data frames transmitted on this Aggregator on all
- * its Aggregation ports. Does not include LACPDUs or Marker PDUs.
- * However, does include frames discarded by the Distribution
- * function.
- * @tx_data_octets: Count of data and padding octets of frames transmitted
- * on this Aggregator on all its Aggregation ports. Does not include
- * octets of LACPDUs or Marker PDUs. However, does include octets of
- * frames discarded by the Distribution function.
- * @tx_mcast_frms: Count of data frames transmitted (to a group destination
- * address other than the broadcast address) on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. However, does include frames discarded by the Distribution
- * function.
- * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
- * on all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. However, does include frames discarded by the Distribution
- * function.
- * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
- * that are discarded by the Distribution function. This occurs when
- * conversation are allocated to different ports and have to be
- * flushed on old ports
- * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
- * experience transmission errors on its Aggregation ports.
- * @rx_frms: Count of data frames received on this Aggregator on all its
- * Aggregation ports. Does not include LACPDUs or Marker PDUs.
- * Also, does not include frames discarded by the Collection
- * function.
- * @rx_data_octets: Count of data and padding octets of frames received on this
- * Aggregator on all its Aggregation ports. Does not include octets
- * of LACPDUs or Marker PDUs. Also, does not include
- * octets of frames
- * discarded by the Collection function.
- * @rx_mcast_frms: Count of data frames received (from a group destination
- * address other than the broadcast address) on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. Also, does not include frames discarded by the Collection
- * function.
- * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. Also, does not include frames discarded by the Collection
- * function.
- * @rx_discarded_frms: Count of data frames received on this Aggregator that are
- * discarded by the Collection function because the Collection
- * function was disabled on the port which the frames are received.
- * @rx_errored_frms: Count of data frames received on this Aggregator that are
- * discarded by its Aggregation ports, or are discarded by the
- * Collection function of the Aggregator, or that are discarded by
- * the Aggregator due to detection of an illegal Slow Protocols PDU.
- * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
- * that are discarded by its Aggregation ports due to detection of
- * an unknown Slow Protocols PDU.
- *
- * Per aggregator XMAC RX statistics.
- */
-struct vxge_hw_xmac_aggr_stats {
-/*0x000*/ u64 tx_frms;
-/*0x008*/ u64 tx_data_octets;
-/*0x010*/ u64 tx_mcast_frms;
-/*0x018*/ u64 tx_bcast_frms;
-/*0x020*/ u64 tx_discarded_frms;
-/*0x028*/ u64 tx_errored_frms;
-/*0x030*/ u64 rx_frms;
-/*0x038*/ u64 rx_data_octets;
-/*0x040*/ u64 rx_mcast_frms;
-/*0x048*/ u64 rx_bcast_frms;
-/*0x050*/ u64 rx_discarded_frms;
-/*0x058*/ u64 rx_errored_frms;
-/*0x060*/ u64 rx_unknown_slow_proto_frms;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
- *
- * @tx_ttl_frms: Count of successfully transmitted MAC frames
- * @tx_ttl_octets: Count of total octets of transmitted frames, not including
- * framing characters (i.e. less framing bits). To determine the
- * total octets of transmitted frames, including framing characters,
- * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
- * otherwise configured, this stat only counts frames that have
- * 8 bytes of preamble for each frame). This stat can be configured
- * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
- * including the preamble octets.
- * @tx_data_octets: Count of data and padding octets of successfully transmitted
- * frames.
- * @tx_mcast_frms: Count of successfully transmitted frames to a group address
- * other than the broadcast address.
- * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
- * group address.
- * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
- * Includes discarded frames that are not sent to the network.
- * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
- * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
- * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
- * are passed to the network.
- * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
- * due to problems within ICMP.
- * @tx_tcp: Count of transmitted TCP segments. Does not include segments
- * containing retransmitted octets.
- * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
- * @tx_udp: Count of transmitted UDP datagrams.
- * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
- * generally occurs when a packet is corrupt somehow, including
- * packets that have IP version mismatches, invalid Layer 2 control
- * fields, etc. L3/L4 checksums are not offloaded, but the packet
- * is still be transmitted.
- * @tx_unknown_protocol: Increments when the TPA encounters an unknown
- * protocol, such as a new IPv6 extension header, or an unsupported
- * Routing Type. The packet still has a checksum calculated but it
- * may be incorrect.
- * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
- * Since, the only control frames supported by this device are
- * PAUSE frames, this register is a count of all transmitted MAC
- * control frames.
- * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
- * on this Aggregation port.
- * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
- * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
- * the network. Increments because of:
- * 1) An internal processing error
- * (such as an uncorrectable ECC error). 2) A frame parsing error
- * during IP checksum calculation.
- * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
- * Aggregation port.
- * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
- * characters that match a pattern that is programmable through
- * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
- * is set to /T/ (i.e. the terminate character), thus the statistic
- * tracks the number of transmitted Terminate characters.
- * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
- * characters that match a pattern that is programmable through
- * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
- * is set to /S/ (i.e. the start character),
- * thus the statistic tracks
- * the number of transmitted Start characters.
- * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
- * columns that match a pattern that is programmable through register
- * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns transmitted at
- * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
- * set to 1, then this stat increments when COLUMN2 is found within
- * 'n' clocks after COLUMN1. Here, 'n' is defined by
- * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
- * to 0, then it means to search anywhere for COLUMN2).
- * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
- * columns that match a pattern that is programmable through register
- * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
- * to 4 x /I/ (i.e. a column containing all idle characters),
- * thus the statistic tracks the number of transmitted Idle columns.
- * @tx_any_err_frms: Count of transmitted frames containing any error that
- * prevents them from being passed to the network. Increments if
- * there is an ECC while reading the frame out of the transmit
- * buffer. Also increments if the transmit protocol assist (TPA)
- * block determines that the frame should not be sent.
- * @tx_drop_frms: Count of frames that could not be sent for no other reason
- * than internal MAC processing. Increments once whenever the
- * transmit buffer is flushed (due to an ECC error on a memory
- * descriptor).
- * @rx_ttl_frms: Count of total received MAC frames, including frames received
- * with frame-too-long, FCS, or length errors. This stat can be
- * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
- * everything, even "frames" as small one byte of preamble.
- * @rx_vld_frms: Count of successfully received MAC frames. Does not include
- * frames received with frame-too-long, FCS, or length errors.
- * @rx_offload_frms: Count of offloaded received frames that are passed to
- * the host.
- * @rx_ttl_octets: Count of total octets of received frames, not including
- * framing characters (i.e. less framing bits). To determine the
- * total octets of received frames, including framing characters,
- * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
- * otherwise configured, this stat only counts frames that have 8
- * bytes of preamble for each frame). This stat can be configured
- * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
- * even the preamble octets of "frames" as small one byte of preamble
- * @rx_data_octets: Count of data and padding octets of successfully received
- * frames. Does not include frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_offload_octets: Count of total octets, not including framing
- * characters, of offloaded received frames that are passed
- * to the host.
- * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
- * nonbroadcast group address. Does not include frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
- * the broadcast group address. Does not include frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_accepted_ucast_frms: Count of successfully received frames containing
- * a unicast address. Only includes frames that are passed to
- * the system.
- * @rx_accepted_nucast_frms: Count of successfully received frames containing
- * a non-unicast (broadcast or multicast) address. Only includes
- * frames that are passed to the system. Could include, for instance,
- * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
- * register is set to pass FCS-errored frames to the host.
- * @rx_tagged_frms: Count of received frames containing a VLAN tag.
- * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
- * + 18 bytes (+ 22 bytes if VLAN-tagged).
- * @rx_usized_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets, that are otherwise well-formed.
- * In other words, counts runts.
- * @rx_osized_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets, that are otherwise
- * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
- * is set to 1, then "more than 1518 octets" becomes "more than 1518
- * (1522 if VLAN-tagged) octets".
- * @rx_frag_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets that had bad FCS. In other
- * words, counts fragments.
- * @rx_jabber_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets that had bad FCS. In other
- * words, counts jabbers. Note: If register
- * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
- * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
- * octets".
- * @rx_ttl_64_frms: Count of total received MAC frames with length (including
- * FCS, but not framing bits) of exactly 64 octets. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ttl_65_127_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 65 and 127
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_128_255_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 128 and 255
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_256_511_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 256 and 511
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 512 and 1023
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1024 and 1518
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1519 and 4095
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 4096 and 8191
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 8192 and
- * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) exceeding
- * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
- * Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
- * @rx_accepted_ip: Count of received IP datagrams that
- * are passed to the system.
- * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
- * errored IP datagrams.
- * @rx_err_ip: Count of received IP datagrams containing errors. For example,
- * bad IP checksum.
- * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
- * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
- * Note: This stat contains a count of all received TCP segments,
- * regardless of whether or not they pertain to an established
- * connection.
- * @rx_udp: Count of received UDP datagrams.
- * @rx_err_tcp: Count of received TCP segments containing errors. For example,
- * bad TCP checksum.
- * @rx_pause_count: Count of number of pause quanta that the MAC has been in
- * the paused state. Recall, one pause quantum equates to 512
- * bit times.
- * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
- * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
- * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
- * this register is a count of all received MAC control frames.
- * Note: This stat may be configured to count all layer 2 errors
- * (i.e. length errors and FCS errors).
- * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
- * not include frames received with frame-too-long or
- * frame-too-short error.
- * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
- * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
- * for VLAN-tagged frames), inclusive, that does not match the
- * number of data octets (including pad) received. Also contains
- * a count of received frames with a length/type field less than
- * 46 (42 for VLAN-tagged frames) and the number of data octets
- * (including pad) received is greater than 46 (42 for VLAN-tagged
- * frames).
- * @rx_out_rng_len_err_frms: Count of received frames with length/type field
- * between 1501 and 1535 decimal, inclusive.
- * @rx_drop_frms: Count of received frames that could not be passed to the host.
- * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
- * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
- * for a list of reasons. Because the RMAC drops one frame at a time,
- * this stat also indicates the number of drop events.
- * @rx_discarded_frms: Count of received frames containing
- * any error that prevents
- * them from being passed to the system. See PORTn_RX_FCS_DISCARD,
- * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
- * reasons.
- * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
- * host. See PORTn_RX_DROP_FRMS for a list of reasons.
- * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
- * host. See PORTn_RX_DROP_FRMS for a list of reasons.
- * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
- * port.
- * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
- * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
- * that carry the Slow Protocols EtherType, but contain an unknown
- * PDU. Or frames that contain the Slow Protocols group MAC address,
- * but do not carry the Slow Protocols EtherType.
- * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
- * this Aggregation port.
- * @rx_fcs_discard: Count of received frames that are discarded because the
- * FCS check failed.
- * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
- * that carry the Slow Protocols EtherType, but contain a badly
- * formed PDU. Or frames that carry the Slow Protocols EtherType,
- * but contain an illegal value of Protocol Subtype.
- * @rx_switch_discard: Count of received frames that are discarded by the
- * internal switch because they did not have an entry in the
- * Filtering Database. This includes frames that had an invalid
- * destination MAC address or VLAN ID. It also includes frames are
- * discarded because they did not satisfy the length requirements
- * of the target VPATH.
- * @rx_len_discard: Count of received frames that are discarded because of an
- * invalid frame length (includes fragments, oversized frames and
- * mismatch between frame length and length/type field). This stat
- * can be configured
- * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
- * @rx_rpa_discard: Count of received frames that were discarded because the
- * receive protocol assist (RPA) discovered and error in the frame
- * or was unable to parse the frame.
- * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
- * Link Aggregation Control Protocol (LACP) frames, etc.) that are
- * discarded.
- * @rx_rts_discard: Count of received frames that are discarded by the receive
- * traffic steering (RTS) logic. Includes those frame discarded
- * because the SSC response contradicted the switch table, because
- * the SSC timed out, or because the target queue could not fit the
- * frame.
- * @rx_trash_discard: Count of received frames that are discarded because
- * receive traffic steering (RTS) steered the frame to the trash
- * queue.
- * @rx_buff_full_discard: Count of received frames that are discarded because
- * internal buffers are full. Includes frames discarded because the
- * RTS logic is waiting for an SSC lookup that has no timeout bound.
- * Also, includes frames that are dropped because the MAC2FAU buffer
- * is nearly full -- this can happen if the external receive buffer
- * is full and the receive path is backing up.
- * @rx_red_discard: Count of received frames that are discarded because of RED
- * (Random Early Discard).
- * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
- * characters occurring between times of normal data transmission
- * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
- * incremented when either -
- * 1) The Reconciliation Sublayer (RS) is expecting one control
- * character and gets another (i.e. is expecting a Start
- * character, but gets another control character).
- * 2) Start control character is not in lane 0
- * Only increments the count by one for each XGMII column.
- * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
- * during normal data transmission. If the Reconciliation Sublayer
- * (RS) receives a control character, other than a terminate control
- * character, during receipt of data octets then this register is
- * incremented. Also increments if the start frame delimiter is not
- * found in the correct location. Only increments the count by one
- * for each XGMII column.
- * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
- * to /E/ (i.e. the error character), thus the statistic tracks the
- * number of Error characters received at any time.
- * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
- * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
- * Only includes symbol errors that are observed between the XGMII
- * Start Frame Delimiter and End Frame Delimiter, inclusive. And
- * only increments the count by one for each frame.
- * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns received at any
- * time.
- * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
- * to /E/ (i.e. the error character), thus the statistic tracks the
- * number of Error characters received at any time.
- * @rx_local_fault: Maintains a count of the number of times that link
- * transitioned from "up" to "down" due to a local fault.
- * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns received at any
- * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
- * to 1, then this stat increments when COLUMN2 is found within 'n'
- * clocks after COLUMN1. Here, 'n' is defined by
- * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
- * 0, then it means to search anywhere for COLUMN2).
- * @rx_jettison: Count of received frames that are jettisoned because internal
- * buffers are full.
- * @rx_remote_fault: Maintains a count of the number of times that link
- * transitioned from "up" to "down" due to a remote fault.
- *
- * XMAC Port Statistics.
- */
-struct vxge_hw_xmac_port_stats {
-/*0x000*/ u64 tx_ttl_frms;
-/*0x008*/ u64 tx_ttl_octets;
-/*0x010*/ u64 tx_data_octets;
-/*0x018*/ u64 tx_mcast_frms;
-/*0x020*/ u64 tx_bcast_frms;
-/*0x028*/ u64 tx_ucast_frms;
-/*0x030*/ u64 tx_tagged_frms;
-/*0x038*/ u64 tx_vld_ip;
-/*0x040*/ u64 tx_vld_ip_octets;
-/*0x048*/ u64 tx_icmp;
-/*0x050*/ u64 tx_tcp;
-/*0x058*/ u64 tx_rst_tcp;
-/*0x060*/ u64 tx_udp;
-/*0x068*/ u32 tx_parse_error;
-/*0x06c*/ u32 tx_unknown_protocol;
-/*0x070*/ u64 tx_pause_ctrl_frms;
-/*0x078*/ u32 tx_marker_pdu_frms;
-/*0x07c*/ u32 tx_lacpdu_frms;
-/*0x080*/ u32 tx_drop_ip;
-/*0x084*/ u32 tx_marker_resp_pdu_frms;
-/*0x088*/ u32 tx_xgmii_char2_match;
-/*0x08c*/ u32 tx_xgmii_char1_match;
-/*0x090*/ u32 tx_xgmii_column2_match;
-/*0x094*/ u32 tx_xgmii_column1_match;
-/*0x098*/ u32 unused1;
-/*0x09c*/ u16 tx_any_err_frms;
-/*0x09e*/ u16 tx_drop_frms;
-/*0x0a0*/ u64 rx_ttl_frms;
-/*0x0a8*/ u64 rx_vld_frms;
-/*0x0b0*/ u64 rx_offload_frms;
-/*0x0b8*/ u64 rx_ttl_octets;
-/*0x0c0*/ u64 rx_data_octets;
-/*0x0c8*/ u64 rx_offload_octets;
-/*0x0d0*/ u64 rx_vld_mcast_frms;
-/*0x0d8*/ u64 rx_vld_bcast_frms;
-/*0x0e0*/ u64 rx_accepted_ucast_frms;
-/*0x0e8*/ u64 rx_accepted_nucast_frms;
-/*0x0f0*/ u64 rx_tagged_frms;
-/*0x0f8*/ u64 rx_long_frms;
-/*0x100*/ u64 rx_usized_frms;
-/*0x108*/ u64 rx_osized_frms;
-/*0x110*/ u64 rx_frag_frms;
-/*0x118*/ u64 rx_jabber_frms;
-/*0x120*/ u64 rx_ttl_64_frms;
-/*0x128*/ u64 rx_ttl_65_127_frms;
-/*0x130*/ u64 rx_ttl_128_255_frms;
-/*0x138*/ u64 rx_ttl_256_511_frms;
-/*0x140*/ u64 rx_ttl_512_1023_frms;
-/*0x148*/ u64 rx_ttl_1024_1518_frms;
-/*0x150*/ u64 rx_ttl_1519_4095_frms;
-/*0x158*/ u64 rx_ttl_4096_8191_frms;
-/*0x160*/ u64 rx_ttl_8192_max_frms;
-/*0x168*/ u64 rx_ttl_gt_max_frms;
-/*0x170*/ u64 rx_ip;
-/*0x178*/ u64 rx_accepted_ip;
-/*0x180*/ u64 rx_ip_octets;
-/*0x188*/ u64 rx_err_ip;
-/*0x190*/ u64 rx_icmp;
-/*0x198*/ u64 rx_tcp;
-/*0x1a0*/ u64 rx_udp;
-/*0x1a8*/ u64 rx_err_tcp;
-/*0x1b0*/ u64 rx_pause_count;
-/*0x1b8*/ u64 rx_pause_ctrl_frms;
-/*0x1c0*/ u64 rx_unsup_ctrl_frms;
-/*0x1c8*/ u64 rx_fcs_err_frms;
-/*0x1d0*/ u64 rx_in_rng_len_err_frms;
-/*0x1d8*/ u64 rx_out_rng_len_err_frms;
-/*0x1e0*/ u64 rx_drop_frms;
-/*0x1e8*/ u64 rx_discarded_frms;
-/*0x1f0*/ u64 rx_drop_ip;
-/*0x1f8*/ u64 rx_drop_udp;
-/*0x200*/ u32 rx_marker_pdu_frms;
-/*0x204*/ u32 rx_lacpdu_frms;
-/*0x208*/ u32 rx_unknown_pdu_frms;
-/*0x20c*/ u32 rx_marker_resp_pdu_frms;
-/*0x210*/ u32 rx_fcs_discard;
-/*0x214*/ u32 rx_illegal_pdu_frms;
-/*0x218*/ u32 rx_switch_discard;
-/*0x21c*/ u32 rx_len_discard;
-/*0x220*/ u32 rx_rpa_discard;
-/*0x224*/ u32 rx_l2_mgmt_discard;
-/*0x228*/ u32 rx_rts_discard;
-/*0x22c*/ u32 rx_trash_discard;
-/*0x230*/ u32 rx_buff_full_discard;
-/*0x234*/ u32 rx_red_discard;
-/*0x238*/ u32 rx_xgmii_ctrl_err_cnt;
-/*0x23c*/ u32 rx_xgmii_data_err_cnt;
-/*0x240*/ u32 rx_xgmii_char1_match;
-/*0x244*/ u32 rx_xgmii_err_sym;
-/*0x248*/ u32 rx_xgmii_column1_match;
-/*0x24c*/ u32 rx_xgmii_char2_match;
-/*0x250*/ u32 rx_local_fault;
-/*0x254*/ u32 rx_xgmii_column2_match;
-/*0x258*/ u32 rx_jettison;
-/*0x25c*/ u32 rx_remote_fault;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
- *
- * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
- * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
- * not including framing characters (i.e. less framing bits).
- * To determine the total octets of transmitted frames, including
- * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
- * this stat (the device always prepends 8 bytes of preamble for
- * each frame)
- * @tx_data_octets: Count of data and padding octets of successfully transmitted
- * frames.
- * @tx_mcast_frms: Count of successfully transmitted frames to a group address
- * other than the broadcast address.
- * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
- * group address.
- * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
- * Includes discarded frames that are not sent to the network.
- * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
- * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
- * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
- * are passed to the network.
- * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
- * to problems within ICMP.
- * @tx_tcp: Count of transmitted TCP segments. Does not include segments
- * containing retransmitted octets.
- * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
- * @tx_udp: Count of transmitted UDP datagrams.
- * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
- * such as a new IPv6 extension header, or an unsupported Routing
- * Type. The packet still has a checksum calculated but it may be
- * incorrect.
- * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
- * to the network. Increments because of: 1) An internal processing
- * error (such as an uncorrectable ECC error). 2) A frame parsing
- * error during IP checksum calculation.
- * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
- * generally occurs when a packet is corrupt somehow, including
- * packets that have IP version mismatches, invalid Layer 2 control
- * fields, etc. L3/L4 checksums are not offloaded, but the packet
- * is still be transmitted.
- * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
- * of transmitted TCP segments. Does not include segments containing
- * retransmitted octets.
- * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
- * total number of segments retransmitted. Retransmitted segments
- * that are sourced by the host are counted by the host.
- * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
- * of transmitted IP datagrams that could not be passed to the
- * network.
- *
- * XMAC Vpath TX Statistics.
- */
-struct vxge_hw_xmac_vpath_tx_stats {
- u64 tx_ttl_eth_frms;
- u64 tx_ttl_eth_octets;
- u64 tx_data_octets;
- u64 tx_mcast_frms;
- u64 tx_bcast_frms;
- u64 tx_ucast_frms;
- u64 tx_tagged_frms;
- u64 tx_vld_ip;
- u64 tx_vld_ip_octets;
- u64 tx_icmp;
- u64 tx_tcp;
- u64 tx_rst_tcp;
- u64 tx_udp;
- u32 tx_unknown_protocol;
- u32 tx_lost_ip;
- u32 unused1;
- u32 tx_parse_error;
- u64 tx_tcp_offload;
- u64 tx_retx_tcp_offload;
- u64 tx_lost_ip_offload;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
- *
- * @rx_ttl_eth_frms: Count of successfully received MAC frames.
- * @rx_vld_frms: Count of successfully received MAC frames. Does not include
- * frames received with frame-too-long, FCS, or length errors.
- * @rx_offload_frms: Count of offloaded received frames that are passed to
- * the host.
- * @rx_ttl_eth_octets: Count of total octets of received frames, not including
- * framing characters (i.e. less framing bits). Only counts octets
- * of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
- * before FCS. To determine the total octets of received frames,
- * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
- * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
- * that have the required 8 bytes of preamble).
- * @rx_data_octets: Count of data and padding octets of successfully received
- * frames. Does not include frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_offload_octets: Count of total octets, not including framing characters,
- * of offloaded received frames that are passed to the host.
- * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
- * nonbroadcast group address. Does not include frames received with
- * frame-too-long, FCS, or length errors.
- * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
- * broadcast group address. Does not include frames received with
- * frame-too-long, FCS, or length errors.
- * @rx_accepted_ucast_frms: Count of successfully received frames containing
- * a unicast address. Only includes frames that are passed to the
- * system.
- * @rx_accepted_nucast_frms: Count of successfully received frames containing
- * a non-unicast (broadcast or multicast) address. Only includes
- * frames that are passed to the system. Could include, for instance,
- * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
- * register is set to pass FCS-errored frames to the host.
- * @rx_tagged_frms: Count of received frames containing a VLAN tag.
- * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
- * + 18 bytes (+ 22 bytes if VLAN-tagged).
- * @rx_usized_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets, that are otherwise well-formed.
- * In other words, counts runts.
- * @rx_osized_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets, that are otherwise
- * well-formed.
- * @rx_frag_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets that had bad FCS.
- * In other words, counts fragments.
- * @rx_jabber_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets that had bad FCS. In other
- * words, counts jabbers.
- * @rx_ttl_64_frms: Count of total received MAC frames with length (including
- * FCS, but not framing bits) of exactly 64 octets. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ttl_65_127_frms: Count of total received MAC frames
- * with length (including
- * FCS, but not framing bits) of between 65 and 127 octets inclusive.
- * Includes frames received with frame-too-long, FCS,
- * or length errors.
- * @rx_ttl_128_255_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits)
- * of between 128 and 255 octets
- * inclusive. Includes frames received with frame-too-long, FCS,
- * or length errors.
- * @rx_ttl_256_511_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits)
- * of between 256 and 511 octets
- * inclusive. Includes frames received with frame-too-long, FCS, or
- * length errors.
- * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 512 and 1023
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1024 and 1518
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1519 and 4095
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 4096 and 8191
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 8192 and
- * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
- * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
- * @rx_accepted_ip: Count of received IP datagrams that
- * are passed to the system.
- * @rx_ip_octets: Count of number of octets in received IP datagrams.
- * Includes errored IP datagrams.
- * @rx_err_ip: Count of received IP datagrams containing errors. For example,
- * bad IP checksum.
- * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
- * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
- * Note: This stat contains a count of all received TCP segments,
- * regardless of whether or not they pertain to an established
- * connection.
- * @rx_udp: Count of received UDP datagrams.
- * @rx_err_tcp: Count of received TCP segments containing errors. For example,
- * bad TCP checksum.
- * @rx_lost_frms: Count of received frames that could not be passed to the host.
- * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
- * for a list of reasons.
- * @rx_lost_ip: Count of received IP datagrams that could not be passed to
- * the host. See RX_LOST_FRMS for a list of reasons.
- * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
- * of received IP datagrams that could not be passed to the host.
- * See RX_LOST_FRMS for a list of reasons.
- * @rx_various_discard: Count of received frames that are discarded because
- * the target receive queue is full.
- * @rx_sleep_discard: Count of received frames that are discarded because the
- * target VPATH is asleep (a Wake-on-LAN magic packet can be used
- * to awaken the VPATH).
- * @rx_red_discard: Count of received frames that are discarded because of RED
- * (Random Early Discard).
- * @rx_queue_full_discard: Count of received frames that are discarded because
- * the target receive queue is full.
- * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
- *
- * XMAC Vpath RX Statistics.
- */
-struct vxge_hw_xmac_vpath_rx_stats {
- u64 rx_ttl_eth_frms;
- u64 rx_vld_frms;
- u64 rx_offload_frms;
- u64 rx_ttl_eth_octets;
- u64 rx_data_octets;
- u64 rx_offload_octets;
- u64 rx_vld_mcast_frms;
- u64 rx_vld_bcast_frms;
- u64 rx_accepted_ucast_frms;
- u64 rx_accepted_nucast_frms;
- u64 rx_tagged_frms;
- u64 rx_long_frms;
- u64 rx_usized_frms;
- u64 rx_osized_frms;
- u64 rx_frag_frms;
- u64 rx_jabber_frms;
- u64 rx_ttl_64_frms;
- u64 rx_ttl_65_127_frms;
- u64 rx_ttl_128_255_frms;
- u64 rx_ttl_256_511_frms;
- u64 rx_ttl_512_1023_frms;
- u64 rx_ttl_1024_1518_frms;
- u64 rx_ttl_1519_4095_frms;
- u64 rx_ttl_4096_8191_frms;
- u64 rx_ttl_8192_max_frms;
- u64 rx_ttl_gt_max_frms;
- u64 rx_ip;
- u64 rx_accepted_ip;
- u64 rx_ip_octets;
- u64 rx_err_ip;
- u64 rx_icmp;
- u64 rx_tcp;
- u64 rx_udp;
- u64 rx_err_tcp;
- u64 rx_lost_frms;
- u64 rx_lost_ip;
- u64 rx_lost_ip_offload;
- u16 rx_various_discard;
- u16 rx_sleep_discard;
- u16 rx_red_discard;
- u16 rx_queue_full_discard;
- u64 rx_mpa_ok_frms;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_stats - XMAC Statistics
- *
- * @aggr_stats: Statistics on aggregate port(port 0, port 1)
- * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
- * @vpath_tx_stats: Per vpath XMAC TX stats
- * @vpath_rx_stats: Per vpath XMAC RX stats
- *
- * XMAC Statistics.
- */
-struct vxge_hw_xmac_stats {
- struct vxge_hw_xmac_aggr_stats
- aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
- struct vxge_hw_xmac_port_stats
- port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
- struct vxge_hw_xmac_vpath_tx_stats
- vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
- struct vxge_hw_xmac_vpath_rx_stats
- vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
- * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
- * for the given VPATH
- * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
- * @ini_num_cpl_rcvd: The number of PCI read completions received by the
- * PIC block
- * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
- * block to the host
- * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
- * the PIC block
- * @wrcrdtarb_xoff: TBD
- * @rdcrdtarb_xoff: TBD
- * @vpath_genstats_count0: TBD
- * @vpath_genstats_count1: TBD
- * @vpath_genstats_count2: TBD
- * @vpath_genstats_count3: TBD
- * @vpath_genstats_count4: TBD
- * @vpath_gennstats_count5: TBD
- * @tx_stats: Transmit stats
- * @rx_stats: Receive stats
- * @prog_event_vnum1: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
- * @prog_event_vnum0: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
- * @prog_event_vnum3: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
- * @prog_event_vnum2: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
- * @rx_multi_cast_frame_discard: TBD
- * @rx_frm_transferred: TBD
- * @rxd_returned: TBD
- * @rx_mpa_len_fail_frms: Count of received frames
- * that fail the MPA length check
- * @rx_mpa_mrk_fail_frms: Count of received frames
- * that fail the MPA marker check
- * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
- * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
- * frame buffer (and subsequently to the host).
- * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
- * because the VPATH is in reset
- * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
- * whenever the received frame matches the VPATH's Wake-on-LAN
- * signature(s) CRC.
- * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
- * because the VPATH is in reset. Includes frames that are discarded
- * because the current VPIN does not match that VPIN of the frame
- *
- * Titan vpath hardware statistics.
- */
-struct vxge_hw_vpath_stats_hw_info {
-/*0x000*/ u32 ini_num_mwr_sent;
-/*0x004*/ u32 unused1;
-/*0x008*/ u32 ini_num_mrd_sent;
-/*0x00c*/ u32 unused2;
-/*0x010*/ u32 ini_num_cpl_rcvd;
-/*0x014*/ u32 unused3;
-/*0x018*/ u64 ini_num_mwr_byte_sent;
-/*0x020*/ u64 ini_num_cpl_byte_rcvd;
-/*0x028*/ u32 wrcrdtarb_xoff;
-/*0x02c*/ u32 unused4;
-/*0x030*/ u32 rdcrdtarb_xoff;
-/*0x034*/ u32 unused5;
-/*0x038*/ u32 vpath_genstats_count0;
-/*0x03c*/ u32 vpath_genstats_count1;
-/*0x040*/ u32 vpath_genstats_count2;
-/*0x044*/ u32 vpath_genstats_count3;
-/*0x048*/ u32 vpath_genstats_count4;
-/*0x04c*/ u32 unused6;
-/*0x050*/ u32 vpath_genstats_count5;
-/*0x054*/ u32 unused7;
-/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats;
-/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats;
-/*0x220*/ u64 unused9;
-/*0x228*/ u32 prog_event_vnum1;
-/*0x22c*/ u32 prog_event_vnum0;
-/*0x230*/ u32 prog_event_vnum3;
-/*0x234*/ u32 prog_event_vnum2;
-/*0x238*/ u16 rx_multi_cast_frame_discard;
-/*0x23a*/ u8 unused10[6];
-/*0x240*/ u32 rx_frm_transferred;
-/*0x244*/ u32 unused11;
-/*0x248*/ u16 rxd_returned;
-/*0x24a*/ u8 unused12[6];
-/*0x252*/ u16 rx_mpa_len_fail_frms;
-/*0x254*/ u16 rx_mpa_mrk_fail_frms;
-/*0x256*/ u16 rx_mpa_crc_fail_frms;
-/*0x258*/ u16 rx_permitted_frms;
-/*0x25c*/ u64 rx_vp_reset_discarded_frms;
-/*0x25e*/ u64 rx_wol_frms;
-/*0x260*/ u64 tx_vp_reset_discarded_frms;
-} __packed;
-
-
-/**
- * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
- * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated
- * by the adapter that were discarded because the VPATH is out of service
- * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the
- * adapter that were discared because the VPATH is out of service
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by
- * the adapter that were discarded because the VPATH instance number does
- * not match
- * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated
- * by the adapter that were discarded because the VPATH instance number
- * does not match
- * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer
- * to the GENSTATS0_CFG for information on configuring this statistic
- * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer
- * to the GENSTATS1_CFG for information on configuring this statistic
- * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer
- * to the GENSTATS2_CFG for information on configuring this statistic
- * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer
- * to the GENSTATS3_CFG for information on configuring this statistic
- * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer
- * to the GENSTATS4_CFG for information on configuring this statistic
- * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer
- * to the GENSTATS5_CFG for information on configuring this statistic
- * @pci.rstdrop_cpl 0x01c8 4
- * @pci.rstdrop_msg 0x01cc 4
- * @pci.rstdrop_client1 0x01d0 4
- * @pci.rstdrop_client0 0x01d4 4
- * @pci.rstdrop_client2 0x01d8 4
- * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion
- * header credits were depleted
- * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted
- * header credits were depleted
- * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted
- * header credits were depleted
- * @pci.depl_cplh[vplane1] 0x01ea 2
- * @pci.depl_nph[vplane1] 0x01ec 2
- * @pci.depl_ph[vplane1] 0x01ee 2
- * @pci.depl_cplh[vplane2] 0x01f2 2
- * @pci.depl_nph[vplane2] 0x01f4 2
- * @pci.depl_ph[vplane2] 0x01f6 2
- * @pci.depl_cplh[vplane3] 0x01fa 2
- * @pci.depl_nph[vplane3] 0x01fc 2
- * @pci.depl_ph[vplane3] 0x01fe 2
- * @pci.depl_cplh[vplane4] 0x0202 2
- * @pci.depl_nph[vplane4] 0x0204 2
- * @pci.depl_ph[vplane4] 0x0206 2
- * @pci.depl_cplh[vplane5] 0x020a 2
- * @pci.depl_nph[vplane5] 0x020c 2
- * @pci.depl_ph[vplane5] 0x020e 2
- * @pci.depl_cplh[vplane6] 0x0212 2
- * @pci.depl_nph[vplane6] 0x0214 2
- * @pci.depl_ph[vplane6] 0x0216 2
- * @pci.depl_cplh[vplane7] 0x021a 2
- * @pci.depl_nph[vplane7] 0x021c 2
- * @pci.depl_ph[vplane7] 0x021e 2
- * @pci.depl_cplh[vplane8] 0x0222 2
- * @pci.depl_nph[vplane8] 0x0224 2
- * @pci.depl_ph[vplane8] 0x0226 2
- * @pci.depl_cplh[vplane9] 0x022a 2
- * @pci.depl_nph[vplane9] 0x022c 2
- * @pci.depl_ph[vplane9] 0x022e 2
- * @pci.depl_cplh[vplane10] 0x0232 2
- * @pci.depl_nph[vplane10] 0x0234 2
- * @pci.depl_ph[vplane10] 0x0236 2
- * @pci.depl_cplh[vplane11] 0x023a 2
- * @pci.depl_nph[vplane11] 0x023c 2
- * @pci.depl_ph[vplane11] 0x023e 2
- * @pci.depl_cplh[vplane12] 0x0242 2
- * @pci.depl_nph[vplane12] 0x0244 2
- * @pci.depl_ph[vplane12] 0x0246 2
- * @pci.depl_cplh[vplane13] 0x024a 2
- * @pci.depl_nph[vplane13] 0x024c 2
- * @pci.depl_ph[vplane13] 0x024e 2
- * @pci.depl_cplh[vplane14] 0x0252 2
- * @pci.depl_nph[vplane14] 0x0254 2
- * @pci.depl_ph[vplane14] 0x0256 2
- * @pci.depl_cplh[vplane15] 0x025a 2
- * @pci.depl_nph[vplane15] 0x025c 2
- * @pci.depl_ph[vplane15] 0x025e 2
- * @pci.depl_cplh[vplane16] 0x0262 2
- * @pci.depl_nph[vplane16] 0x0264 2
- * @pci.depl_ph[vplane16] 0x0266 2
- * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data
- * credits were depleted
- * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data
- * credits were depleted
- * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data
- * credits were depleted
- * @pci.depl_cpld[vplane1] 0x0272 2
- * @pci.depl_npd[vplane1] 0x0274 2
- * @pci.depl_pd[vplane1] 0x0276 2
- * @pci.depl_cpld[vplane2] 0x027a 2
- * @pci.depl_npd[vplane2] 0x027c 2
- * @pci.depl_pd[vplane2] 0x027e 2
- * @pci.depl_cpld[vplane3] 0x0282 2
- * @pci.depl_npd[vplane3] 0x0284 2
- * @pci.depl_pd[vplane3] 0x0286 2
- * @pci.depl_cpld[vplane4] 0x028a 2
- * @pci.depl_npd[vplane4] 0x028c 2
- * @pci.depl_pd[vplane4] 0x028e 2
- * @pci.depl_cpld[vplane5] 0x0292 2
- * @pci.depl_npd[vplane5] 0x0294 2
- * @pci.depl_pd[vplane5] 0x0296 2
- * @pci.depl_cpld[vplane6] 0x029a 2
- * @pci.depl_npd[vplane6] 0x029c 2
- * @pci.depl_pd[vplane6] 0x029e 2
- * @pci.depl_cpld[vplane7] 0x02a2 2
- * @pci.depl_npd[vplane7] 0x02a4 2
- * @pci.depl_pd[vplane7] 0x02a6 2
- * @pci.depl_cpld[vplane8] 0x02aa 2
- * @pci.depl_npd[vplane8] 0x02ac 2
- * @pci.depl_pd[vplane8] 0x02ae 2
- * @pci.depl_cpld[vplane9] 0x02b2 2
- * @pci.depl_npd[vplane9] 0x02b4 2
- * @pci.depl_pd[vplane9] 0x02b6 2
- * @pci.depl_cpld[vplane10] 0x02ba 2
- * @pci.depl_npd[vplane10] 0x02bc 2
- * @pci.depl_pd[vplane10] 0x02be 2
- * @pci.depl_cpld[vplane11] 0x02c2 2
- * @pci.depl_npd[vplane11] 0x02c4 2
- * @pci.depl_pd[vplane11] 0x02c6 2
- * @pci.depl_cpld[vplane12] 0x02ca 2
- * @pci.depl_npd[vplane12] 0x02cc 2
- * @pci.depl_pd[vplane12] 0x02ce 2
- * @pci.depl_cpld[vplane13] 0x02d2 2
- * @pci.depl_npd[vplane13] 0x02d4 2
- * @pci.depl_pd[vplane13] 0x02d6 2
- * @pci.depl_cpld[vplane14] 0x02da 2
- * @pci.depl_npd[vplane14] 0x02dc 2
- * @pci.depl_pd[vplane14] 0x02de 2
- * @pci.depl_cpld[vplane15] 0x02e2 2
- * @pci.depl_npd[vplane15] 0x02e4 2
- * @pci.depl_pd[vplane15] 0x02e6 2
- * @pci.depl_cpld[vplane16] 0x02ea 2
- * @pci.depl_npd[vplane16] 0x02ec 2
- * @pci.depl_pd[vplane16] 0x02ee 2
- * @xgmac_port[3];
- * @xgmac_aggr[2];
- * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic.
- * Increments when internal logic detects a certain event. See register
- * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
- * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic.
- * Increments when internal logic detects a certain event. See register
- * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
- * @xgmac.orp_lro_events 0x0af8 8
- * @xgmac.orp_bs_events 0x0b00 8
- * @xgmac.orp_iwarp_events 0x0b08 8
- * @xgmac.tx_permitted_frms 0x0b14 4
- * @xgmac.port2_tx_any_frms 0x0b1d 1
- * @xgmac.port1_tx_any_frms 0x0b1e 1
- * @xgmac.port0_tx_any_frms 0x0b1f 1
- * @xgmac.port2_rx_any_frms 0x0b25 1
- * @xgmac.port1_rx_any_frms 0x0b26 1
- * @xgmac.port0_rx_any_frms 0x0b27 1
- *
- * Titan mrpcim hardware statistics.
- */
-struct vxge_hw_device_stats_mrpcim_info {
-/*0x0000*/ u32 pic_ini_rd_drop;
-/*0x0004*/ u32 pic_ini_wr_drop;
-/*0x0008*/ struct {
- /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted;
- /*0x0004*/ u32 unused1;
- } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
-/*0x0090*/ struct {
- /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted;
- /*0x0004*/ u32 unused2;
- } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
-/*0x0118*/ struct {
- /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted;
- /*0x0004*/ u32 unused3;
- } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
-/*0x01a0*/ u32 pic_ini_rd_vpin_drop;
-/*0x01a4*/ u32 pic_ini_wr_vpin_drop;
-/*0x01a8*/ u32 pic_genstats_count0;
-/*0x01ac*/ u32 pic_genstats_count1;
-/*0x01b0*/ u32 pic_genstats_count2;
-/*0x01b4*/ u32 pic_genstats_count3;
-/*0x01b8*/ u32 pic_genstats_count4;
-/*0x01bc*/ u32 unused4;
-/*0x01c0*/ u32 pic_genstats_count5;
-/*0x01c4*/ u32 unused5;
-/*0x01c8*/ u32 pci_rstdrop_cpl;
-/*0x01cc*/ u32 pci_rstdrop_msg;
-/*0x01d0*/ u32 pci_rstdrop_client1;
-/*0x01d4*/ u32 pci_rstdrop_client0;
-/*0x01d8*/ u32 pci_rstdrop_client2;
-/*0x01dc*/ u32 unused6;
-/*0x01e0*/ struct {
- /*0x0000*/ u16 unused7;
- /*0x0002*/ u16 pci_depl_cplh;
- /*0x0004*/ u16 pci_depl_nph;
- /*0x0006*/ u16 pci_depl_ph;
- } pci_depl_h_vplane[17];
-/*0x0268*/ struct {
- /*0x0000*/ u16 unused8;
- /*0x0002*/ u16 pci_depl_cpld;
- /*0x0004*/ u16 pci_depl_npd;
- /*0x0006*/ u16 pci_depl_pd;
- } pci_depl_d_vplane[17];
-/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3];
-/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
-/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0;
-/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1;
-/*0x0af0*/ u64 unused7;
-/*0x0af8*/ u64 unused8;
-/*0x0b00*/ u64 unused9;
-/*0x0b08*/ u64 unused10;
-/*0x0b10*/ u32 unused11;
-/*0x0b14*/ u32 xgmac_tx_permitted_frms;
-/*0x0b18*/ u32 unused12;
-/*0x0b1c*/ u8 unused13;
-/*0x0b1d*/ u8 xgmac_port2_tx_any_frms;
-/*0x0b1e*/ u8 xgmac_port1_tx_any_frms;
-/*0x0b1f*/ u8 xgmac_port0_tx_any_frms;
-/*0x0b20*/ u32 unused14;
-/*0x0b24*/ u8 unused15;
-/*0x0b25*/ u8 xgmac_port2_rx_any_frms;
-/*0x0b26*/ u8 xgmac_port1_rx_any_frms;
-/*0x0b27*/ u8 xgmac_port0_rx_any_frms;
-} __packed;
-
-/**
- * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
- * @vpath_info: VPath statistics
- * @vpath_info_sav: Vpath statistics saved
- *
- * Titan hardware statistics.
- */
-struct vxge_hw_device_stats_hw_info {
- struct vxge_hw_vpath_stats_hw_info
- *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
- struct vxge_hw_vpath_stats_hw_info
- vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_common_info - HW common
- * statistics for queues.
- * @full_cnt: Number of times the queue was full
- * @usage_cnt: usage count.
- * @usage_max: Maximum usage
- * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
- * @total_compl_cnt: Total descriptor completion count.
- *
- * Hw queue counters
- * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
- * struct vxge_hw_vpath_stats_sw_ring_info{},
- */
-struct vxge_hw_vpath_stats_sw_common_info {
- u32 full_cnt;
- u32 usage_cnt;
- u32 usage_max;
- u32 reserve_free_swaps_cnt;
- u32 total_compl_cnt;
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
- * @common_stats: Common counters for all queues
- * @total_posts: Total number of postings on the queue.
- * @total_buffers: Total number of buffers posted.
- * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
- * (index) in this array reflects the transfer code type, for instance
- * 0xA - "loss of link".
- * Value txd_t_code_err_cnt[i] reflects the
- * number of times the corresponding transfer code was encountered.
- *
- * HW fifo counters
- * See also: struct vxge_hw_vpath_stats_sw_common_info{},
- * struct vxge_hw_vpath_stats_sw_ring_info{},
- */
-struct vxge_hw_vpath_stats_sw_fifo_info {
- struct vxge_hw_vpath_stats_sw_common_info common_stats;
- u32 total_posts;
- u32 total_buffers;
- u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
- * @common_stats: Common counters for all queues
- * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
- * (index) in this array reflects the transfer code type,
- * for instance
- * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
- * Value rxd_t_code_err_cnt[i] reflects the
- * number of times the corresponding transfer code was encountered.
- *
- * HW ring counters
- * See also: struct vxge_hw_vpath_stats_sw_common_info{},
- * struct vxge_hw_vpath_stats_sw_fifo_info{},
- */
-struct vxge_hw_vpath_stats_sw_ring_info {
- struct vxge_hw_vpath_stats_sw_common_info common_stats;
- u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
-
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
- * @unknown_alarms:
- * @network_sustained_fault:
- * @network_sustained_ok:
- * @kdfcctl_fifo0_overwrite:
- * @kdfcctl_fifo0_poison:
- * @kdfcctl_fifo0_dma_error:
- * @dblgen_fifo0_overflow:
- * @statsb_pif_chain_error:
- * @statsb_drop_timeout:
- * @target_illegal_access:
- * @ini_serr_det:
- * @prc_ring_bumps:
- * @prc_rxdcm_sc_err:
- * @prc_rxdcm_sc_abort:
- * @prc_quanta_size_err:
- *
- * HW vpath error statistics
- */
-struct vxge_hw_vpath_stats_sw_err {
- u32 unknown_alarms;
- u32 network_sustained_fault;
- u32 network_sustained_ok;
- u32 kdfcctl_fifo0_overwrite;
- u32 kdfcctl_fifo0_poison;
- u32 kdfcctl_fifo0_dma_error;
- u32 dblgen_fifo0_overflow;
- u32 statsb_pif_chain_error;
- u32 statsb_drop_timeout;
- u32 target_illegal_access;
- u32 ini_serr_det;
- u32 prc_ring_bumps;
- u32 prc_rxdcm_sc_err;
- u32 prc_rxdcm_sc_abort;
- u32 prc_quanta_size_err;
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
- * @soft_reset_cnt: Number of times soft reset is done on this vpath.
- * @error_stats: error counters for the vpath
- * @ring_stats: counters for ring belonging to the vpath
- * @fifo_stats: counters for fifo belonging to the vpath
- *
- * HW vpath sw statistics
- * See also: struct vxge_hw_device_info{} }.
- */
-struct vxge_hw_vpath_stats_sw_info {
- u32 soft_reset_cnt;
- struct vxge_hw_vpath_stats_sw_err error_stats;
- struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
- struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
-};
-
-/**
- * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
- *
- * @not_traffic_intr_cnt: Number of times the host was interrupted
- * without new completions.
- * "Non-traffic interrupt counter".
- * @traffic_intr_cnt: Number of traffic interrupts for the device.
- * @total_intr_cnt: Total number of traffic interrupts for the device.
- * @total_intr_cnt == @traffic_intr_cnt +
- * @not_traffic_intr_cnt
- * @soft_reset_cnt: Number of times soft reset is done on this device.
- * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
- * HW per-device statistics.
- */
-struct vxge_hw_device_stats_sw_info {
- u32 not_traffic_intr_cnt;
- u32 traffic_intr_cnt;
- u32 total_intr_cnt;
- u32 soft_reset_cnt;
- struct vxge_hw_vpath_stats_sw_info
- vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_device_stats_sw_err - HW device error statistics.
- * @vpath_alarms: Number of vpath alarms
- *
- * HW Device error stats
- */
-struct vxge_hw_device_stats_sw_err {
- u32 vpath_alarms;
-};
-
-/**
- * struct vxge_hw_device_stats - Contains HW per-device statistics,
- * including hw.
- * @devh: HW device handle.
- * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
- * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
- * space.
- * @hw_info_dma_acch: One more DMA handle used subsequently to free the
- * DMA object. Note that this and the previous handle have
- * physical meaning for Solaris; on Windows and Linux the
- * corresponding value will be simply pointer to PCI device.
- *
- * @hw_dev_info_stats: Titan statistics maintained by the hardware.
- * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
- * of completions per interrupt.
- * @sw_dev_err_stats: HW's "soft" device error statistics.
- *
- * Structure-container of HW per-device statistics. Note that per-channel
- * statistics are kept in separate structures under HW's fifo and ring
- * channels.
- */
-struct vxge_hw_device_stats {
- /* handles */
- struct __vxge_hw_device *devh;
-
- /* HW device hardware statistics */
- struct vxge_hw_device_stats_hw_info hw_dev_info_stats;
-
- /* HW device "soft" stats */
- struct vxge_hw_device_stats_sw_err sw_dev_err_stats;
- struct vxge_hw_device_stats_sw_info sw_dev_info_stats;
-
-};
-
-enum vxge_hw_status vxge_hw_device_hw_stats_enable(
- struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_stats_get(
- struct __vxge_hw_device *devh,
- struct vxge_hw_device_stats_hw_info *hw_stats);
-
-enum vxge_hw_status vxge_hw_driver_stats_get(
- struct __vxge_hw_device *devh,
- struct vxge_hw_device_stats_sw_info *sw_stats);
-
-enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status
-vxge_hw_mrpcim_stats_access(
- struct __vxge_hw_device *devh,
- u32 operation,
- u32 location,
- u32 offset,
- u64 *stat);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
- struct vxge_hw_xmac_stats *xmac_stats);
-
-/**
- * enum enum vxge_hw_mgmt_reg_type - Register types.
- *
- * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
- * @vxge_hw_mgmt_reg_type_toc: TOC Registers
- * @vxge_hw_mgmt_reg_type_common: Common Registers
- * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
- * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
- * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
- * @vxge_hw_mgmt_reg_type_vpath: vpath registers
- *
- * Register type enumaration
- */
-enum vxge_hw_mgmt_reg_type {
- vxge_hw_mgmt_reg_type_legacy = 0,
- vxge_hw_mgmt_reg_type_toc = 1,
- vxge_hw_mgmt_reg_type_common = 2,
- vxge_hw_mgmt_reg_type_mrpcim = 3,
- vxge_hw_mgmt_reg_type_srpcim = 4,
- vxge_hw_mgmt_reg_type_vpmgmt = 5,
- vxge_hw_mgmt_reg_type_vpath = 6
-};
-
-enum vxge_hw_status
-vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
- enum vxge_hw_mgmt_reg_type type,
- u32 index,
- u32 offset,
- u64 *value);
-
-enum vxge_hw_status
-vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
- enum vxge_hw_mgmt_reg_type type,
- u32 index,
- u32 offset,
- u64 value);
-
-/**
- * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
- * @VXGE_HW_RXD_STATE_NONE: Invalid state.
- * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
- * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
- * device.
- * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
- * filling-in and posting later.
- *
- * Titan/HW descriptor states.
- *
- */
-enum vxge_hw_rxd_state {
- VXGE_HW_RXD_STATE_NONE = 0,
- VXGE_HW_RXD_STATE_AVAIL = 1,
- VXGE_HW_RXD_STATE_POSTED = 2,
- VXGE_HW_RXD_STATE_FREED = 3
-};
-
-/**
- * struct vxge_hw_ring_rxd_info - Extended information associated with a
- * completed ring descriptor.
- * @syn_flag: SYN flag
- * @is_icmp: Is ICMP
- * @fast_path_eligible: Fast Path Eligible flag
- * @l3_cksum: in L3 checksum is valid
- * @l3_cksum: Result of IP checksum check (by Titan hardware).
- * This field containing VXGE_HW_L3_CKSUM_OK would mean that
- * the checksum is correct, otherwise - the datagram is
- * corrupted.
- * @l4_cksum: in L4 checksum is valid
- * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
- * This field containing VXGE_HW_L4_CKSUM_OK would mean that
- * the checksum is correct. Otherwise - the packet is
- * corrupted.
- * @frame: Zero or more of enum vxge_hw_frame_type flags.
- * See enum vxge_hw_frame_type{}.
- * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for
- * various higher-layer protocols, including (but note restricted to)
- * TCP and UDP. See enum vxge_hw_frame_proto{}.
- * @is_vlan: If vlan tag is valid
- * @vlan: VLAN tag extracted from the received frame.
- * @rth_bucket: RTH bucket
- * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
- * has a matching entry in the Indirection table.
- * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
- * has a matching entry in the Socket Pair Direct Match table.
- * @rth_hash_type: RTH hash code of the function used to calculate the hash.
- * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
- * hardware if RTH is enabled.
- */
-struct vxge_hw_ring_rxd_info {
- u32 syn_flag;
- u32 is_icmp;
- u32 fast_path_eligible;
- u32 l3_cksum_valid;
- u32 l3_cksum;
- u32 l4_cksum_valid;
- u32 l4_cksum;
- u32 frame;
- u32 proto;
- u32 is_vlan;
- u32 vlan;
- u32 rth_bucket;
- u32 rth_it_hit;
- u32 rth_spdm_hit;
- u32 rth_hash_type;
- u32 rth_value;
-};
-/**
- * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
- * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
- * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
- * configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
- * configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
- * presentation configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
- * such as unknown IPv6 header.
- * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
- * error, such as FCS or ECC).
- * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
- * s) were not appropriately sized and data loss occurred.
- * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
- * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
- * Segment1 exceeded the capacity of Buffer1 and the remainder
- * was placed in Buffer2. Segment2 now starts in Buffer3.
- * No data loss or errors occurred.
- * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
- * assigned buffers has a size of 0 bytes.
- * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
- * VPath Reset or because of a VPIN mismatch.
- * @VXGE_HW_RING_T_CODE_UNUSED: Unused
- * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
- * transfer code condition occurred.
- *
- * Transfer codes returned by adapter.
- */
-enum vxge_hw_ring_tcode {
- VXGE_HW_RING_T_CODE_OK = 0x0,
- VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
- VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
- VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
- VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
- VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
- VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
- VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
- VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
- VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
- VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
- VXGE_HW_RING_T_CODE_UNUSED = 0xE,
- VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
-};
-
-enum vxge_hw_status vxge_hw_ring_rxd_reserve(
- struct __vxge_hw_ring *ring_handle,
- void **rxdh);
-
-void
-vxge_hw_ring_rxd_pre_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void
-vxge_hw_ring_rxd_post_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void
-vxge_hw_ring_rxd_post_post_wmb(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void vxge_hw_ring_rxd_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
- struct __vxge_hw_ring *ring_handle,
- void **rxdh,
- u8 *t_code);
-
-enum vxge_hw_status vxge_hw_ring_handle_tcode(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- u8 t_code);
-
-void vxge_hw_ring_rxd_free(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-/**
- * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
- * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
- * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
- * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
- * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
- * @VXGE_HW_FRAME_PROTO_TCP: TCP.
- * @VXGE_HW_FRAME_PROTO_UDP: UDP.
- * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
- *
- * Higher layer ethernet protocols and options.
- */
-enum vxge_hw_frame_proto {
- VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
- VXGE_HW_FRAME_PROTO_IPV4 = 0x10,
- VXGE_HW_FRAME_PROTO_IPV6 = 0x08,
- VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04,
- VXGE_HW_FRAME_PROTO_TCP = 0x02,
- VXGE_HW_FRAME_PROTO_UDP = 0x01,
- VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \
- VXGE_HW_FRAME_PROTO_UDP)
-};
-
-/**
- * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
- * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
- *
- * These gather codes are used to indicate the position of a TxD in a TxD list
- */
-enum vxge_hw_fifo_gather_code {
- VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
- VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
- VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
- VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
-};
-
-/**
- * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
- * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
- * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
- * frame data) returned with corrupt data.
- * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
- * with no data.
- * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
- * frame or LSO MSS that was too long (>9800B).
- * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
- * Offload operation, due to improper header template,
- * unsupported protocol, etc.
- * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
- * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
- * data buffer transfer errors are encountered (see below).
- * Otherwise it is set to 0.
- *
- * These tcodes are returned in various API for TxD status
- */
-enum vxge_hw_fifo_tcode {
- VXGE_HW_FIFO_T_CODE_OK = 0x0,
- VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
- VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
- VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
- VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
- VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
- VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
-};
-
-enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
- struct __vxge_hw_fifo *fifoh,
- void **txdlh,
- void **txdl_priv);
-
-void vxge_hw_fifo_txdl_buffer_set(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- u32 frag_idx,
- dma_addr_t dma_pointer,
- u32 size);
-
-void vxge_hw_fifo_txdl_post(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh);
-
-u32 vxge_hw_fifo_free_txdl_count_get(
- struct __vxge_hw_fifo *fifo_handle);
-
-enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
- struct __vxge_hw_fifo *fifoh,
- void **txdlh,
- enum vxge_hw_fifo_tcode *t_code);
-
-enum vxge_hw_status vxge_hw_fifo_handle_tcode(
- struct __vxge_hw_fifo *fifoh,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code);
-
-void vxge_hw_fifo_txdl_free(
- struct __vxge_hw_fifo *fifoh,
- void *txdlh);
-
-/*
- * Device
- */
-
-#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
-#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
-
-/*
- * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
- * @dma_addr: DMA (mapped) address of _this_ descriptor.
- * @dma_handle: DMA handle used to map the descriptor onto device.
- * @dma_offset: Descriptor's offset in the memory block. HW allocates
- * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
- * bytes. Each memblock is contiguous DMA-able memory. Each
- * memblock contains 1 or more 4KB RxD blocks visible to the
- * Titan hardware.
- * @dma_object: DMA address and handle of the memory block that contains
- * the descriptor. This member is used only in the "checked"
- * version of the HW (to enforce certain assertions);
- * otherwise it gets compiled out.
- * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
- *
- * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
- * information associated with the descriptor. Note that driver can ask HW
- * to allocate additional per-descriptor space for its own (driver-specific)
- * purposes.
- */
-struct __vxge_hw_ring_rxd_priv {
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- ptrdiff_t dma_offset;
-#ifdef VXGE_DEBUG_ASSERT
- struct vxge_hw_mempool_dma *dma_object;
-#endif
-};
-
-struct vxge_hw_mempool_cbs {
- void (*item_func_alloc)(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index,
- u32 is_last);
-};
-
-#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
- ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
-
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 action,
- u32 rts_table,
- u32 offset,
- u64 *data1,
- u64 *data2);
-
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 action,
- u32 rts_table,
- u32 offset,
- u64 data1,
- u64 data2);
-
-enum vxge_hw_status
-__vxge_hw_vpath_enable(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-void vxge_hw_device_intr_enable(
- struct __vxge_hw_device *devh);
-
-u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
-
-void vxge_hw_device_intr_disable(
- struct __vxge_hw_device *devh);
-
-void vxge_hw_device_mask_all(
- struct __vxge_hw_device *devh);
-
-void vxge_hw_device_unmask_all(
- struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_begin_irq(
- struct __vxge_hw_device *devh,
- u32 skip_alarms,
- u64 *reason);
-
-void vxge_hw_device_clear_tx_rx(
- struct __vxge_hw_device *devh);
-
-/*
- * Virtual Paths
- */
-
-void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
-
-void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
-
-u32 vxge_hw_vpath_id(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_vpath_mac_addr_add_mode {
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
- VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
- VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
-};
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask,
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 vid);
-
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 vid);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 etype);
-
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_mcast_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_poll_rx(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status vxge_hw_vpath_poll_tx(
- struct __vxge_hw_fifo *fifoh,
- struct sk_buff ***skb_ptr, int nr_skb, int *more);
-
-enum vxge_hw_status vxge_hw_vpath_alarm_process(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 skip_alarms);
-
-void
-vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
- int *tim_msix_id, int alarm_msix_id);
-
-void
-vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
-
-void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
-
-void
-vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-enum vxge_hw_status vxge_hw_vpath_intr_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_intr_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void vxge_hw_vpath_inta_mask_tx_rx(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void vxge_hw_vpath_inta_unmask_tx_rx(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void
-vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
- void **dtrh);
-
-void
-vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
-
-void
-vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
-
-int
-vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
-
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
-
-void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-version.h b/drivers/net/ethernet/neterion/vxge/vxge-version.h
deleted file mode 100644
index b9efa28bab3e..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-version.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_VERSION_H
-#define VXGE_VERSION_H
-
-#define VXGE_VERSION_MAJOR "2"
-#define VXGE_VERSION_MINOR "5"
-#define VXGE_VERSION_FIX "3"
-#define VXGE_VERSION_BUILD "22640"
-#define VXGE_VERSION_FOR "k"
-
-#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
-
-#define VXGE_DEAD_FW_VER_MAJOR 1
-#define VXGE_DEAD_FW_VER_MINOR 4
-#define VXGE_DEAD_FW_VER_BUILD 4
-
-#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
- VXGE_DEAD_FW_VER_MINOR, \
- VXGE_DEAD_FW_VER_BUILD)
-
-#define VXGE_EPROM_FW_VER_MAJOR 1
-#define VXGE_EPROM_FW_VER_MINOR 6
-#define VXGE_EPROM_FW_VER_BUILD 1
-
-#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
- VXGE_EPROM_FW_VER_MINOR, \
- VXGE_EPROM_FW_VER_BUILD)
-
-#define VXGE_CERT_FW_VER_MAJOR 1
-#define VXGE_CERT_FW_VER_MINOR 8
-#define VXGE_CERT_FW_VER_BUILD 1
-
-#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
- VXGE_CERT_FW_VER_MINOR, \
- VXGE_CERT_FW_VER_BUILD)
-
-#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index b456e81a73a4..7453cc5ba832 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -149,7 +149,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
}
/* Pre_lag action must be first on action list.
- * If other actions already exist they need pushed forward.
+ * If other actions already exist they need to be pushed forward.
*/
if (act_len)
memmove(nfp_flow->action_data + act_size,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 7c31a46195b2..b3b2a23b8d89 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -182,7 +182,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
u8 ip_proto = 0;
/* Temporary buffer for mangling keys, 64 is enough to cover max
* struct size of key in various fields that may be mangled.
- * Supported fileds to mangle:
+ * Supported fields to mangle:
* mac_src/mac_dst(struct flow_match_eth_addrs, 12B)
* nw_tos/nw_ttl(struct flow_match_ip, 2B)
* nw_src/nw_dst(struct flow_match_ipv4/6_addrs, 32B)
@@ -194,7 +194,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
entry1->netdev != entry2->netdev)
return -EINVAL;
- /* check the overlapped fields one by one, the unmasked part
+ /* Check the overlapped fields one by one, the unmasked part
* should not conflict with each other.
*/
if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -563,7 +563,7 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
- /* ip_proto is the only field that needed in later compile_action,
+ /* ip_proto is the only field that is needed in later compile_action,
* needed to set the correct checksum flags. It doesn't really matter
* which input rule's ip_proto field we take as the earlier merge checks
* would have made sure that they don't conflict. We do not know which
@@ -1013,7 +1013,7 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
nft_m_entry->tc_m_parent = tc_m_entry;
nft_m_entry->nft_parent = nft_entry;
nft_m_entry->tc_flower_cookie = 0;
- /* Copy the netdev from one the pre_ct entry. When the tc_m_entry was created
+ /* Copy the netdev from the pre_ct entry. When the tc_m_entry was created
* it only combined them if the netdevs were the same, so can use any of them.
*/
nft_m_entry->netdev = pre_ct_entry->netdev;
@@ -1143,7 +1143,7 @@ nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
zt->priv = priv;
zt->nft = NULL;
- /* init the various hash tables and lists*/
+ /* init the various hash tables and lists */
INIT_LIST_HEAD(&zt->pre_ct_list);
INIT_LIST_HEAD(&zt->post_ct_list);
INIT_LIST_HEAD(&zt->nft_flows_list);
@@ -1346,7 +1346,7 @@ static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
*/
if (is_nft_flow) {
- /* Need to iterate through list of nft_flow entries*/
+ /* Need to iterate through list of nft_flow entries */
struct nfp_fl_ct_flow_entry *ct_entry = entry;
list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
@@ -1354,7 +1354,7 @@ static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
cleanup_nft_merge_entry(m_entry);
}
} else {
- /* Need to iterate through list of tc_merged_flow entries*/
+ /* Need to iterate through list of tc_merged_flow entries */
struct nfp_fl_ct_tc_merge *ct_entry = entry;
list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index ede90e086b28..e92860e20a24 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -234,7 +234,7 @@ nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
}
/* To signal the end of a batch, both the switch and last flags are set
- * and the the reserved SYNC group ID is used.
+ * and the reserved SYNC group ID is used.
*/
if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
@@ -576,7 +576,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
group->dirty = true;
group->slave_cnt = slave_count;
- /* Group may have been on queue for removal but is now offloable. */
+ /* Group may have been on queue for removal but is now offloadable. */
group->to_remove = false;
mutex_unlock(&lag->lock);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 74e1b279c13b..0f06ef6e24bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -339,7 +339,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
goto err_free_ctx_entry;
}
- /* Do net allocate a mask-id for pre_tun_rules. These flows are used to
+ /* Do not allocate a mask-id for pre_tun_rules. These flows are used to
* configure the pre_tun table and are never actually send to the
* firmware as an add-flow message. This causes the mask-id allocation
* on the firmware to get out of sync if allocated here.
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 9d65459bdba5..83c97154c0c7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -359,7 +359,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_opts(rule, &enc_op);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
- /* check if GRE, which has no enc_ports */
+ /* Check if GRE, which has no enc_ports */
if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
@@ -1016,7 +1016,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
nfp_flower_is_merge_flow(sub_flow2))
return -EINVAL;
- /* check if the two flows are already merged */
+ /* Check if the two flows are already merged */
parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
if (rhashtable_lookup_fast(&priv->merge_table,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 3206ba83b1aa..4e5df9f2c372 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -534,7 +534,7 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
}
}
-/* offload tc action, currently only for tc police */
+/* Offload tc action, currently only for tc police */
static const struct rhashtable_params stats_meter_table_params = {
.key_offset = offsetof(struct nfp_meter_entry, meter_id),
@@ -690,7 +690,7 @@ nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
for (i = 0 ; i < action_num; i++) {
- /*set qos associate data for this interface */
+ /* Set qos associate data for this interface */
action = paction + i;
if (action->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack,
@@ -736,7 +736,7 @@ nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
u32 meter_id;
bool pps;
- /*delete qos associate data for this interface */
+ /* Delete qos associate data for this interface */
if (fl_act->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: qos rate limit offload requires police action");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 6bf3ec448e7e..0af5541c6eaf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1064,7 +1064,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
return 0;
entry->ref_count--;
- /* If del is part of a mod then mac_list is still in use elsewheree. */
+ /* If del is part of a mod then mac_list is still in use elsewhere. */
if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index f9410d59146d..448c1c1afaee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -3,6 +3,7 @@
#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
@@ -81,12 +82,11 @@ nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
}
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
@@ -167,30 +167,35 @@ nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
u64_stats_update_end(&r_vec->tx_sync);
}
-static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
+static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb, u64 tls_handle)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
+ bool vlan_insert;
u32 meta_id = 0;
int md_bytes;
- if (likely(!md_dst && !tls_handle))
- return 0;
- if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
- if (!tls_handle)
- return 0;
- md_dst = NULL;
+ if (unlikely(md_dst || tls_handle)) {
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
+ md_dst = NULL;
}
- md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
+ vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
+
+ if (!(md_dst || tls_handle || vlan_insert))
+ return 0;
+
+ md_bytes = sizeof(meta_id) +
+ !!md_dst * NFP_NET_META_PORTID_SIZE +
+ !!tls_handle * NFP_NET_META_CONN_HANDLE_SIZE +
+ vlan_insert * NFP_NET_META_VLAN_SIZE;
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
- meta_id = 0;
data = skb_push(skb, md_bytes) + md_bytes;
if (md_dst) {
- data -= 4;
+ data -= NFP_NET_META_PORTID_SIZE;
put_unaligned_be32(md_dst->u.port_info.port_id, data);
meta_id = NFP_NET_META_PORTID;
}
@@ -198,13 +203,23 @@ static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
/* conn handle is opaque, we just use u64 to be able to quickly
* compare it to zero
*/
- data -= 8;
+ data -= NFP_NET_META_CONN_HANDLE_SIZE;
memcpy(data, &tls_handle, sizeof(tls_handle));
meta_id <<= NFP_NET_META_FIELD_SIZE;
meta_id |= NFP_NET_META_CONN_HANDLE;
}
+ if (vlan_insert) {
+ data -= NFP_NET_META_VLAN_SIZE;
+ /* data type of skb->vlan_proto is __be16
+ * so it fills metadata without calling put_unaligned_be16
+ */
+ memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto));
+ put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_VLAN;
+ }
- data -= 4;
+ data -= sizeof(meta_id);
put_unaligned_be32(meta_id, data);
return md_bytes;
@@ -258,7 +273,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- md_bytes = nfp_nfd3_prep_tx_meta(skb, tls_handle);
+ md_bytes = nfp_nfd3_prep_tx_meta(dp, skb, tls_handle);
if (unlikely(md_bytes < 0))
goto err_flush;
@@ -704,7 +719,7 @@ bool
nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
- u32 meta_info;
+ u32 meta_info, vlan_info;
meta_info = get_unaligned_be32(data);
data += 4;
@@ -722,6 +737,17 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_VLAN:
+ vlan_info = get_unaligned_be32(data);
+ if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
+ meta->vlan.stripped = true;
+ meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
+ vlan_info);
+ meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
+ vlan_info);
+ }
+ data += 4;
+ break;
case NFP_NET_META_PORTID:
meta->portid = get_unaligned_be32(data);
data += 4;
@@ -1050,9 +1076,11 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
#endif
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
index f31eabdc0631..a03190c9313c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
@@ -247,10 +247,13 @@ nfp_nfd3_print_tx_descs(struct seq_file *file,
NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2 | NFP_NET_CFG_CTRL_RXQINQ | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2 | \
NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_RSS | \
NFP_NET_CFG_CTRL_IRQMOD | NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_VEPA | \
NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index 454fea4c8be2..65e243168765 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -94,9 +94,12 @@ static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
nfp_nfd3_rx_csum(dp, r_vec, rxd, meta, skb);
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, meta))) {
+ dev_kfree_skb_any(skb);
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+
if (meta_xdp)
skb_metadata_set(skb,
xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index 300637e576a8..0b4f550aa39d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -46,28 +46,16 @@ nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
}
segs = skb_shinfo(skb)->gso_segs;
mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
- /* Note: TSO of the packet with metadata prepended to skb is not
- * supported yet, in which case l3/l4_offset and lso_hdrlen need
- * be correctly handled here.
- * Concern:
- * The driver doesn't have md_bytes easily available at this point.
- * The PCI.IN PD ME won't have md_bytes bytes to add to lso_hdrlen,
- * so it needs the full length there. The app MEs might prefer
- * l3_offset and l4_offset relative to the start of packet data,
- * but could probably cope with it being relative to the CTM buf
- * data offset.
- */
txd.l3_offset = l3_offset;
txd.l4_offset = l4_offset;
txd.lso_meta_res = 0;
@@ -191,12 +179,6 @@ static int nfp_nfdk_prep_port_id(struct sk_buff *skb)
if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
return 0;
- /* Note: Unsupported case when TSO a skb with metedata prepended.
- * See the comments in `nfp_nfdk_tx_tso` for details.
- */
- if (unlikely(md_dst && skb_is_gso(skb)))
- return -EOPNOTSUPP;
-
if (unlikely(skb_cow_head(skb, sizeof(md_dst->u.port_info.port_id))))
return -ENOMEM;
@@ -717,7 +699,7 @@ static bool
nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
- u32 meta_info;
+ u32 meta_info, vlan_info;
meta_info = get_unaligned_be32(data);
data += 4;
@@ -735,6 +717,17 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_VLAN:
+ vlan_info = get_unaligned_be32(data);
+ if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
+ meta->vlan.stripped = true;
+ meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
+ vlan_info);
+ meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
+ vlan_info);
+ }
+ data += 4;
+ break;
case NFP_NET_META_PORTID:
meta->portid = get_unaligned_be32(data);
data += 4;
@@ -1170,9 +1163,11 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
index f4d94ae0a349..6cd895d3b571 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
@@ -168,10 +168,11 @@ nfp_nfdk_print_tx_descs(struct seq_file *file,
NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2 | NFP_NET_CFG_CTRL_RXQINQ | \
NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_IRQMOD | \
- NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_TXRWB | NFP_NET_CFG_CTRL_VEPA | \
NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 4f88d17536c3..36b173039024 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -410,7 +410,9 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
return NULL;
}
- fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "nffw.partno");
+ if (!fw_model)
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
if (!fw_model) {
dev_err(&pdev->dev, "Error: can't read part number\n");
return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index b07cea8e354c..a101ff30a1ae 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -248,6 +248,8 @@ struct nfp_net_rx_desc {
};
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+#define NFP_NET_VLAN_CTAG 0
+#define NFP_NET_VLAN_STAG 1
struct nfp_meta_parsed {
u8 hash_type;
@@ -256,6 +258,11 @@ struct nfp_meta_parsed {
u32 mark;
u32 portid;
__wsum csum;
+ struct {
+ bool stripped;
+ u8 tpid;
+ u16 tci;
+ } vlan;
};
struct nfp_net_rx_hash {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 57f284eefeb3..c5c3a4aac788 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -31,6 +31,7 @@
#include <linux/ethtool.h>
#include <linux/log2.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/ktime.h>
@@ -597,7 +598,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
return skb;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
resync_pending = tls_offload_tx_resync_pending(skb->sk);
@@ -665,7 +666,7 @@ void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
return;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
@@ -1694,16 +1695,18 @@ static int nfp_net_set_features(struct net_device *netdev,
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
- new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_RXVLAN;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
if (features & NETIF_F_HW_VLAN_CTAG_TX)
- new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_TXVLAN;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
@@ -1713,6 +1716,13 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
}
+ if (changed & NETIF_F_HW_VLAN_STAG_RX) {
+ if (features & NETIF_F_HW_VLAN_STAG_RX)
+ new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
+ }
+
if (changed & NETIF_F_SG) {
if (features & NETIF_F_SG)
new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
@@ -1742,6 +1752,27 @@ static int nfp_net_set_features(struct net_device *netdev,
}
static netdev_features_t
+nfp_net_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (features & NETIF_F_HW_VLAN_STAG_RX)) {
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ netdev_warn(netdev,
+ "S-tag and C-tag stripping can't be enabled at the same time. Enabling S-tag stripping and disabling C-tag stripping\n");
+ } else if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) {
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ netdev->wanted_features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ netdev_warn(netdev,
+ "S-tag and C-tag stripping can't be enabled at the same time. Enabling C-tag stripping and disabling S-tag stripping\n");
+ }
+ }
+ return features;
+}
+
+static netdev_features_t
nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
@@ -1757,8 +1788,7 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
if (skb_is_gso(skb)) {
u32 hdrlen;
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
/* Assume worst case scenario of having longest possible
* metadata prepend - 8B
@@ -1892,6 +1922,69 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
+static int nfp_net_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ u16 mode;
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
+ return -EOPNOTSUPP;
+
+ mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ?
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
+ nlflags, filter_mask, NULL);
+}
+
+static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem, err;
+ u32 new_ctrl;
+ u16 mode;
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ if (nla_len(attr) < sizeof(mode))
+ return -EINVAL;
+
+ new_ctrl = nn->dp.ctrl;
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA)
+ new_ctrl |= NFP_NET_CFG_CTRL_VEPA;
+ else if (mode == BRIDGE_MODE_VEB)
+ new_ctrl &= ~NFP_NET_CFG_CTRL_VEPA;
+ else
+ return -EOPNOTSUPP;
+
+ if (new_ctrl == nn->dp.ctrl)
+ return 0;
+
+ nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (!err)
+ nn->dp.ctrl = new_ctrl;
+
+ return err;
+ }
+
+ return -EINVAL;
+}
+
const struct net_device_ops nfp_nfd3_netdev_ops = {
.ndo_init = nfp_app_ndo_init,
.ndo_uninit = nfp_app_ndo_uninit,
@@ -1914,11 +2007,14 @@ const struct net_device_ops nfp_nfd3_netdev_ops = {
.ndo_change_mtu = nfp_net_change_mtu,
.ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
+ .ndo_fix_features = nfp_net_fix_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
.ndo_xsk_wakeup = nfp_net_xsk_wakeup,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+ .ndo_bridge_getlink = nfp_net_bridge_getlink,
+ .ndo_bridge_setlink = nfp_net_bridge_setlink,
};
const struct net_device_ops nfp_nfdk_netdev_ops = {
@@ -1932,6 +2028,7 @@ const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
+ .ndo_set_vf_rate = nfp_app_set_vf_rate,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
.ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
@@ -1942,10 +2039,13 @@ const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_change_mtu = nfp_net_change_mtu,
.ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
+ .ndo_fix_features = nfp_net_fix_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+ .ndo_bridge_getlink = nfp_net_bridge_getlink,
+ .ndo_bridge_setlink = nfp_net_bridge_setlink,
};
static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
@@ -1993,7 +2093,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2002,6 +2102,9 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RXQINQ ? "RXQINQ " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ? "TXVLAN2 " : "",
nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
@@ -2012,6 +2115,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_TXRWB ? "TXRWB " : "",
+ nn->cap & NFP_NET_CFG_CTRL_VEPA ? "VEPA " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
@@ -2288,31 +2392,39 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->vlan_features = netdev->hw_features;
- if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN_ANY) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_RXVLAN;
}
- if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
} else {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_TXVLAN;
}
}
if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
}
+ if (nn->cap & NFP_NET_CFG_CTRL_RXQINQ) {
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
+ }
netdev->features = netdev->hw_features;
if (nfp_app_has_tc(nn->app) && nn->port)
netdev->hw_features |= NETIF_F_HW_TC;
- /* Advertise but disable TSO by default. */
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
+ /* C-Tag strip and S-Tag strip can't be supported simultaneously,
+ * so enable C-Tag strip and disable S-Tag strip by default.
+ */
+ netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
/* Finalise the netdev setup */
switch (nn->dp.ops->version) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 8892a94f00c3..ac05ec34d69e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -31,10 +31,16 @@
#define NFP_NET_LSO_MAX_HDR_SZ 255
#define NFP_NET_LSO_MAX_SEGS 64
+/* working with metadata vlan api (NFD version >= 2.0) */
+#define NFP_NET_META_VLAN_STRIP BIT(31)
+#define NFP_NET_META_VLAN_TPID_MASK GENMASK(19, 16)
+#define NFP_NET_META_VLAN_TCI_MASK GENMASK(15, 0)
+
/* Prepend field types */
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
+#define NFP_NET_META_VLAN 4 /* ctag or stag type */
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
@@ -42,6 +48,10 @@
#define NFP_META_PORT_ID_CTRL ~0U
+/* Prepend field sizes */
+#define NFP_NET_META_VLAN_SIZE 4
+#define NFP_NET_META_PORTID_SIZE 4
+#define NFP_NET_META_CONN_HANDLE_SIZE 8
/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
@@ -89,11 +99,15 @@
#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
#define NFP_NET_CFG_CTRL_CMSG_DATA (0x1 << 12) /* RX cmsgs on data Qs */
+#define NFP_NET_CFG_CTRL_RXQINQ (0x1 << 13) /* Enable S-tag strip */
+#define NFP_NET_CFG_CTRL_RXVLAN_V2 (0x1 << 15) /* Enable C-tag strip */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
+#define NFP_NET_CFG_CTRL_VEPA (0x1 << 22) /* Enable VEPA mode */
+#define NFP_NET_CFG_CTRL_TXVLAN_V2 (0x1 << 23) /* Enable VLAN C-tag insert*/
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
@@ -110,6 +124,10 @@
NFP_NET_CFG_CTRL_CSUM_COMPLETE)
#define NFP_NET_CFG_CTRL_CHAIN_META (NFP_NET_CFG_CTRL_RSS2 | \
NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+#define NFP_NET_CFG_CTRL_RXVLAN_ANY (NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2)
+#define NFP_NET_CFG_CTRL_TXVLAN_ANY (NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2)
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
index 34dd94811df3..550df83b798c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
@@ -440,3 +440,27 @@ bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
return ret;
}
+
+bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta)
+{
+ u16 tpid = 0, tci = 0;
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) {
+ tpid = ETH_P_8021Q;
+ tci = le16_to_cpu(rxd->rxd.vlan);
+ } else if (meta->vlan.stripped) {
+ if (meta->vlan.tpid == NFP_NET_VLAN_CTAG)
+ tpid = ETH_P_8021Q;
+ else if (meta->vlan.tpid == NFP_NET_VLAN_STAG)
+ tpid = ETH_P_8021AD;
+ else
+ return false;
+
+ tci = meta->vlan.tci;
+ }
+ if (tpid)
+ __vlan_hwaccel_put_tag(skb, htons(tpid), tci);
+
+ return true;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
index 83becb338478..831c83ce0d3d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
@@ -106,6 +106,8 @@ int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
+bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta);
enum nfp_nfd_version {
NFP_NFD_VER_NFD3,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 15e9cf71a8e2..c922dfab8080 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -29,6 +29,7 @@
#include "nfp_net_dp.h"
#include "nfp_net.h"
#include "nfp_port.h"
+#include "nfpcore/nfp_cpp.h"
struct nfp_et_stat {
char name[ETH_GSTRING_LEN];
@@ -442,6 +443,160 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
+static int nfp_test_link(struct net_device *netdev)
+{
+ if (!netif_carrier_ok(netdev) || !(netdev->flags & IFF_UP))
+ return 1;
+
+ return 0;
+}
+
+static int nfp_test_nsp(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ struct nfp_nsp_identify *nspi;
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_nsp_open(app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_info(netdev, "NSP Test: failed to access the NSP: %d\n", err);
+ goto exit;
+ }
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 15) {
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ nspi = kzalloc(sizeof(*nspi), GFP_KERNEL);
+ if (!nspi) {
+ err = -ENOMEM;
+ goto exit_close_nsp;
+ }
+
+ err = nfp_nsp_read_identify(nsp, nspi, sizeof(*nspi));
+ if (err < 0)
+ netdev_info(netdev, "NSP Test: reading bsp version failed %d\n", err);
+
+ kfree(nspi);
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+exit:
+ return err;
+}
+
+static int nfp_test_fw(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err;
+
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (err)
+ netdev_info(netdev, "FW Test: update failed %d\n", err);
+
+ return err;
+}
+
+static int nfp_test_reg(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ struct nfp_cpp *cpp = app->cpp;
+ u32 model = nfp_cpp_model(cpp);
+ u32 value;
+ int err;
+
+ err = nfp_cpp_model_autodetect(cpp, &value);
+ if (err < 0) {
+ netdev_info(netdev, "REG Test: NFP model detection failed %d\n", err);
+ return err;
+ }
+
+ return (value == model) ? 0 : 1;
+}
+
+static bool link_test_supported(struct net_device *netdev)
+{
+ return true;
+}
+
+static bool nsp_test_supported(struct net_device *netdev)
+{
+ if (nfp_app_from_netdev(netdev))
+ return true;
+
+ return false;
+}
+
+static bool fw_test_supported(struct net_device *netdev)
+{
+ if (nfp_netdev_is_nfp_net(netdev))
+ return true;
+
+ return false;
+}
+
+static bool reg_test_supported(struct net_device *netdev)
+{
+ if (nfp_app_from_netdev(netdev))
+ return true;
+
+ return false;
+}
+
+static struct nfp_self_test_item {
+ char name[ETH_GSTRING_LEN];
+ bool (*is_supported)(struct net_device *dev);
+ int (*func)(struct net_device *dev);
+} nfp_self_test[] = {
+ {"Link Test", link_test_supported, nfp_test_link},
+ {"NSP Test", nsp_test_supported, nfp_test_nsp},
+ {"Firmware Test", fw_test_supported, nfp_test_fw},
+ {"Register Test", reg_test_supported, nfp_test_reg}
+};
+
+#define NFP_TEST_TOTAL_NUM ARRAY_SIZE(nfp_self_test)
+
+static void nfp_get_self_test_strings(struct net_device *netdev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
+ if (nfp_self_test[i].is_supported(netdev))
+ ethtool_sprintf(&data, nfp_self_test[i].name);
+}
+
+static int nfp_get_self_test_count(struct net_device *netdev)
+{
+ int i, count = 0;
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
+ if (nfp_self_test[i].is_supported(netdev))
+ count++;
+
+ return count;
+}
+
+static void nfp_net_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ int i, ret, count = 0;
+
+ netdev_info(netdev, "Start self test\n");
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++) {
+ if (nfp_self_test[i].is_supported(netdev)) {
+ ret = nfp_self_test[i].func(netdev);
+ if (ret)
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[count++] = ret;
+ }
+ }
+
+ netdev_info(netdev, "Test end\n");
+}
+
static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -705,6 +860,9 @@ static void nfp_net_get_strings(struct net_device *netdev,
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
break;
+ case ETH_SS_TEST:
+ nfp_get_self_test_strings(netdev, data);
+ break;
}
}
@@ -739,6 +897,8 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
cnt += nfp_mac_get_stats_count(netdev);
cnt += nfp_app_port_get_stats_count(nn->port);
return cnt;
+ case ETH_SS_TEST:
+ return nfp_get_self_test_count(netdev);
default:
return -EOPNOTSUPP;
}
@@ -757,6 +917,9 @@ static void nfp_port_get_strings(struct net_device *netdev,
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(port, data);
break;
+ case ETH_SS_TEST:
+ nfp_get_self_test_strings(netdev, data);
+ break;
}
}
@@ -786,6 +949,8 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
count = nfp_mac_get_stats_count(netdev);
count += nfp_app_port_get_stats_count(port);
return count;
+ case ETH_SS_TEST:
+ return nfp_get_self_test_count(netdev);
default:
return -EOPNOTSUPP;
}
@@ -1477,6 +1642,38 @@ static void nfp_port_get_pauseparam(struct net_device *netdev,
pause->tx_pause = 1;
}
+static int nfp_net_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Control LED to blink */
+ err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 1);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Control LED to normal mode */
+ err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 0);
+ break;
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_OFF:
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -1485,6 +1682,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
.set_ringparam = nfp_net_set_ringparam,
+ .self_test = nfp_net_self_test,
.get_strings = nfp_net_get_strings,
.get_ethtool_stats = nfp_net_get_stats,
.get_sset_count = nfp_net_get_sset_count,
@@ -1510,6 +1708,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
.get_pauseparam = nfp_port_get_pauseparam,
+ .set_phys_id = nfp_net_set_phys_id,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
@@ -1517,6 +1716,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
+ .self_test = nfp_net_self_test,
.get_sset_count = nfp_port_get_sset_count,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
@@ -1528,6 +1728,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
.get_pauseparam = nfp_port_get_pauseparam,
+ .set_phys_id = nfp_net_set_phys_id,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 75b5018f2e1b..8b77582bdfa0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -365,9 +365,9 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->vlan_features = netdev->hw_features;
- if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN)
+ if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN_ANY)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
if (repr_cap & NFP_NET_CFG_CTRL_LSO2)
netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
else
@@ -375,11 +375,15 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
}
if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (repr_cap & NFP_NET_CFG_CTRL_RXQINQ)
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
netdev->features = netdev->hw_features;
- /* Advertise but disable TSO by default. */
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ /* C-Tag strip and S-Tag strip can't be supported simultaneously,
+ * so enable C-Tag strip and disable S-Tag strip by default.
+ */
+ netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index f5360bae6f75..77d66855be42 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -196,6 +196,8 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
int
nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state);
+
static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
{
return !!eth_port->fec_modes_supported;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 311a5be25acb..edd300033735 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -49,6 +49,7 @@
#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
+#define NSP_ETH_CTRL_SET_IDMODE BIT_ULL(8)
enum nfp_eth_raw {
NSP_ETH_RAW_PORT = 0,
@@ -492,6 +493,35 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
return 0;
}
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ u64 reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ /* Set this features were added in ABI 0.32 */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
+ nfp_err(nfp_nsp_cpp(nsp),
+ "set id mode operation not supported, please update flash\n");
+ return -EOPNOTSUPP;
+ }
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ reg = le64_to_cpu(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_SET_IDMODE;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, state);
+ entries[idx].control = cpu_to_le64(reg);
+
+ nfp_nsp_config_set_modified(nsp, true);
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \
({ \
__BF_FIELD_CHECK(mask, 0ULL, val, "NFP_ETH_SET_BIT_CONFIG: "); \
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index f54035455ad6..c03986bf2628 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -947,10 +947,9 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
}
if (encap)
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
tso_rem = len;
seg_rem = min(tso_rem, hdrlen + mss);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 07dd3c3b1771..4e6f00af17d9 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1877,7 +1877,7 @@ netxen_tso_check(struct net_device *netdev,
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
skb_shinfo(skb)->gso_size > 0) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 82e74f62b677..d701ecd3ba00 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1110,7 +1110,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
bit_len);
/* Some bits represent more than a
- * a single interrupt. Correctly print
+ * single interrupt. Correctly print
* their name.
*/
if (ATTENTION_LENGTH(flags) > 2 ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 69b0ede75cae..5a5dbbb8d8aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -42,8 +42,7 @@ int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
bmap->max_count = max_count;
- bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
- GFP_KERNEL);
+ bmap->bitmap = bitmap_zalloc(max_count, GFP_KERNEL);
if (!bmap->bitmap)
return -ENOMEM;
@@ -107,7 +106,7 @@ int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
static bool qed_bmap_is_empty(struct qed_bmap *bmap)
{
- return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
+ return bitmap_empty(bmap->bitmap, bmap->max_count);
}
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
@@ -343,7 +342,7 @@ void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
}
end:
- kfree(bmap->bitmap);
+ bitmap_free(bmap->bitmap);
bmap->bitmap = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index b7cc36589f59..7c2af482192d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -260,11 +260,9 @@ static int map_frag_to_bd(struct qede_tx_queue *txq,
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
{
if (is_encap_pkt)
- return (skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data);
- else
- return (skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 8d43ca282956..9da5e97f8a0a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -497,7 +497,7 @@ set_flags:
}
opcode = QLCNIC_TX_ETHER_PKT;
if (skb_is_gso(skb)) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->hdr_length = hdr_len;
opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 80c95c331c82..0d80447d4d3b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1264,7 +1264,7 @@ static int emac_tso_csum(struct emac_adapter *adpt,
pskb_trim(skb, pkt_len);
}
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* we only need to do csum */
netif_warn(adpt, tx_err, adpt->netdev,
@@ -1339,7 +1339,7 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
/* if Large Segment Offload is (in TCP Segmentation Offload struct) */
if (TPD_LSO(tpd)) {
- mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mapped_len = skb_tcp_all_headers(skb);
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
tpbuf->length = mapped_len;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3098d6672192..1b7fdb4f056b 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4190,7 +4190,6 @@ static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
- u32 transport_offset = (u32)skb_transport_offset(skb);
struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 mss = shinfo->gso_size;
@@ -4207,7 +4206,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
WARN_ON_ONCE(1);
}
- opts[0] |= transport_offset << GTTCPHO_SHIFT;
+ opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT;
opts[1] |= mss << TD1_MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
@@ -4235,7 +4234,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
else
WARN_ON_ONCE(1);
- opts[1] |= transport_offset << TCPHO_SHIFT;
+ opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT;
} else {
unsigned int padto = rtl_quirk_packet_padto(tp, skb);
@@ -4402,14 +4401,13 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- int transport_offset = skb_transport_offset(skb);
struct rtl8169_private *tp = netdev_priv(dev);
if (skb_is_gso(skb)) {
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
features = rtl8168evl_fix_tso(skb, features);
- if (transport_offset > GTTCPHO_MAX &&
+ if (skb_transport_offset(skb) > GTTCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -4420,7 +4418,7 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
if (rtl_quirk_packet_padto(tp, skb))
features &= ~NETIF_F_CSUM_MASK;
- if (transport_offset > TCPHO_MAX &&
+ if (skb_transport_offset(skb) > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_CSUM_MASK;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 407a1f8e3059..a1c10b61269b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -89,7 +89,7 @@ static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
{
- /* Exit and disable EEE in case of we are are in LPI state. */
+ /* Exit and disable EEE in case of we are in LPI state. */
priv->hw->mac->reset_eee_mode(priv->ioaddr);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 186cb28c03bd..a99c3a6b912c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3874,7 +3874,7 @@ static int efx_ef10_udp_tnl_set_port(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
struct efx_ef10_nic_data *nic_data;
int efx_tunnel_type, rc;
@@ -3934,7 +3934,7 @@ static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
struct efx_ef10_nic_data *nic_data;
int rc;
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index 173f0ecebc70..425017fbcb25 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -423,65 +423,58 @@ static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx,
*/
static void ef100_pci_remove(struct pci_dev *pci_dev)
{
- struct efx_nic *efx;
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+ struct efx_probe_data *probe_data;
- efx = pci_get_drvdata(pci_dev);
if (!efx)
return;
- rtnl_lock();
- dev_close(efx->net_dev);
- rtnl_unlock();
-
- /* Unregistering our netdev notifier triggers unbinding of TC indirect
- * blocks, so we have to do it before PCI removal.
- */
- unregister_netdevice_notifier(&efx->netdev_notifier);
-#if defined(CONFIG_SFC_SRIOV)
- if (!efx->type->is_vf)
- efx_ef100_pci_sriov_disable(efx);
-#endif
+ probe_data = container_of(efx, struct efx_probe_data, efx);
+ ef100_remove_netdev(probe_data);
+
ef100_remove(efx);
efx_fini_io(efx);
- netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
- pci_set_drvdata(pci_dev, NULL);
- efx_fini_struct(efx);
- free_netdev(efx->net_dev);
+ pci_dbg(pci_dev, "shutdown successful\n");
pci_disable_pcie_error_reporting(pci_dev);
+
+ pci_set_drvdata(pci_dev, NULL);
+ efx_fini_struct(efx);
+ kfree(probe_data);
};
static int ef100_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
struct ef100_func_ctl_window fcw = { 0 };
- struct net_device *net_dev;
+ struct efx_probe_data *probe_data;
struct efx_nic *efx;
int rc;
- /* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
- if (!net_dev)
+ /* Allocate probe data and struct efx_nic */
+ probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
+ if (!probe_data)
return -ENOMEM;
- efx = netdev_priv(net_dev);
+ probe_data->pci_dev = pci_dev;
+ efx = &probe_data->efx;
+
efx->type = (const struct efx_nic_type *)entry->driver_data;
+ efx->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, efx);
- SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail;
efx->vi_stride = EF100_DEFAULT_VI_STRIDE;
- netif_info(efx, probe, efx->net_dev,
- "Solarflare EF100 NIC detected\n");
+ pci_info(pci_dev, "Solarflare EF100 NIC detected\n");
rc = ef100_pci_find_func_ctrl_window(efx, &fcw);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Error looking for ef100 function control window, rc=%d\n",
- rc);
+ pci_err(pci_dev,
+ "Error looking for ef100 function control window, rc=%d\n",
+ rc);
goto fail;
}
@@ -493,8 +486,7 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
}
if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) {
- netif_err(efx, probe, efx->net_dev,
- "Func control window overruns BAR\n");
+ pci_err(pci_dev, "Func control window overruns BAR\n");
rc = -EIO;
goto fail;
}
@@ -508,19 +500,16 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
efx->reg_base = fcw.offset;
- efx->netdev_notifier.notifier_call = ef100_netdev_event;
- rc = register_netdevice_notifier(&efx->netdev_notifier);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Failed to register netdevice notifier, rc=%d\n", rc);
+ rc = efx->type->probe(efx);
+ if (rc)
goto fail;
- }
- rc = efx->type->probe(efx);
+ efx->state = STATE_PROBED;
+ rc = ef100_probe_netdev(probe_data);
if (rc)
goto fail;
- netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+ pci_dbg(pci_dev, "initialisation successful\n");
return 0;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 5dba4125d953..702abbe59b76 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -26,7 +26,7 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
ring->rx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 67fe44db6b61..060392ddd612 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -22,6 +22,7 @@
#include "ef100_regs.h"
#include "mcdi_filters.h"
#include "rx_common.h"
+#include "ef100_sriov.h"
static void ef100_update_name(struct efx_nic *efx)
{
@@ -79,7 +80,7 @@ static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
*/
static int ef100_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
@@ -96,13 +97,15 @@ static int ef100_net_stop(struct net_device *net_dev)
efx_mcdi_free_vis(efx);
efx_remove_interrupts(efx);
+ efx->state = STATE_NET_DOWN;
+
return 0;
}
/* Context: process, rtnl_lock() held. */
static int ef100_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
unsigned int allocated_vis;
int rc;
@@ -172,6 +175,8 @@ static int ef100_net_open(struct net_device *net_dev)
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
+ efx->state = STATE_NET_UP;
+
return 0;
fail:
@@ -189,7 +194,7 @@ fail:
static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
int rc;
@@ -239,13 +244,14 @@ int ef100_netdev_event(struct notifier_block *this,
struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if (netdev_priv(net_dev) == efx && event == NETDEV_CHANGENAME)
+ if (efx->net_dev == net_dev &&
+ (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
ef100_update_name(efx);
return NOTIFY_DONE;
}
-int ef100_register_netdev(struct efx_nic *efx)
+static int ef100_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
int rc;
@@ -271,7 +277,7 @@ int ef100_register_netdev(struct efx_nic *efx)
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
- efx->state = STATE_READY;
+ efx->state = STATE_NET_DOWN;
rtnl_unlock();
efx_init_mcdi_logging(efx);
@@ -283,11 +289,119 @@ fail_locked:
return rc;
}
-void ef100_unregister_netdev(struct efx_nic *efx)
+static void ef100_unregister_netdev(struct efx_nic *efx)
{
if (efx_dev_registered(efx)) {
efx_fini_mcdi_logging(efx);
- efx->state = STATE_UNINIT;
+ efx->state = STATE_PROBED;
unregister_netdev(efx->net_dev);
}
}
+
+void ef100_remove_netdev(struct efx_probe_data *probe_data)
+{
+ struct efx_nic *efx = &probe_data->efx;
+
+ if (!efx->net_dev)
+ return;
+
+ rtnl_lock();
+ dev_close(efx->net_dev);
+ rtnl_unlock();
+
+ unregister_netdevice_notifier(&efx->netdev_notifier);
+#if defined(CONFIG_SFC_SRIOV)
+ if (!efx->type->is_vf)
+ efx_ef100_pci_sriov_disable(efx);
+#endif
+
+ ef100_unregister_netdev(efx);
+
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+ efx_fini_channels(efx);
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+
+ free_netdev(efx->net_dev);
+ efx->net_dev = NULL;
+ efx->state = STATE_PROBED;
+}
+
+int ef100_probe_netdev(struct efx_probe_data *probe_data)
+{
+ struct efx_nic *efx = &probe_data->efx;
+ struct efx_probe_data **probe_ptr;
+ struct net_device *net_dev;
+ int rc;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
+ pci_info(efx->pci_dev, "No network port on this PCI function");
+ return 0;
+ }
+
+ /* Allocate and initialise a struct net_device */
+ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ probe_ptr = netdev_priv(net_dev);
+ *probe_ptr = probe_data;
+ efx->net_dev = net_dev;
+ SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
+
+ net_dev->features |= efx->type->offload_features;
+ net_dev->hw_features |= efx->type->offload_features;
+ net_dev->hw_enc_features |= efx->type->offload_features;
+ net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
+ netif_set_tso_max_segs(net_dev,
+ ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
+ efx->mdio.dev = net_dev;
+
+ rc = efx_ef100_init_datapath_caps(efx);
+ if (rc < 0)
+ goto fail;
+
+ rc = ef100_phy_probe(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ down_write(&efx->filter_sem);
+ rc = ef100_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc)
+ goto fail;
+
+ netdev_rss_key_fill(efx->rss_context.rx_hash_key,
+ sizeof(efx->rss_context.rx_hash_key));
+
+ /* Don't fail init if RSS setup doesn't work. */
+ efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
+
+ rc = ef100_register_netdev(efx);
+ if (rc)
+ goto fail;
+
+ if (!efx->type->is_vf) {
+ rc = ef100_probe_netdev_pf(efx);
+ if (rc)
+ goto fail;
+ }
+
+ efx->netdev_notifier.notifier_call = ef100_netdev_event;
+ rc = register_netdevice_notifier(&efx->netdev_notifier);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to register netdevice notifier, rc=%d\n", rc);
+ goto fail;
+ }
+
+fail:
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.h b/drivers/net/ethernet/sfc/ef100_netdev.h
index d40abb7cc086..38b032ba0953 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.h
+++ b/drivers/net/ethernet/sfc/ef100_netdev.h
@@ -13,5 +13,5 @@
int ef100_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
-int ef100_register_netdev(struct efx_nic *efx);
-void ef100_unregister_netdev(struct efx_nic *efx);
+int ef100_probe_netdev(struct efx_probe_data *probe_data);
+void ef100_remove_netdev(struct efx_probe_data *probe_data);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index b2536d2c218a..f89e695cf8ac 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -148,7 +148,7 @@ static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
return 0;
}
-static int efx_ef100_init_datapath_caps(struct efx_nic *efx)
+int efx_ef100_init_datapath_caps(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
struct ef100_nic_data *nic_data = efx->nic_data;
@@ -327,7 +327,7 @@ static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ef100_phy_probe(struct efx_nic *efx)
+int ef100_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
int rc;
@@ -365,7 +365,7 @@ static int ef100_phy_probe(struct efx_nic *efx)
return 0;
}
-static int ef100_filter_table_probe(struct efx_nic *efx)
+int ef100_filter_table_probe(struct efx_nic *efx)
{
return efx_mcdi_filter_table_probe(efx, true);
}
@@ -704,178 +704,6 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
return 10 * EFX_RECYCLE_RING_SIZE_10G;
}
-/* NIC level access functions
- */
-#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
- NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
- NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
- NETIF_F_HW_VLAN_CTAG_TX)
-
-const struct efx_nic_type ef100_pf_nic_type = {
- .revision = EFX_REV_EF100,
- .is_vf = false,
- .probe = ef100_probe_pf,
- .offload_features = EF100_OFFLOAD_FEATURES,
- .mcdi_max_ver = 2,
- .mcdi_request = ef100_mcdi_request,
- .mcdi_poll_response = ef100_mcdi_poll_response,
- .mcdi_read_response = ef100_mcdi_read_response,
- .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
- .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
- .irq_enable_master = efx_port_dummy_op_void,
- .irq_test_generate = efx_ef100_irq_test_generate,
- .irq_disable_non_ev = efx_port_dummy_op_void,
- .push_irq_moderation = efx_channel_dummy_op_void,
- .min_interrupt_mode = EFX_INT_MODE_MSIX,
- .map_reset_reason = ef100_map_reset_reason,
- .map_reset_flags = ef100_map_reset_flags,
- .reset = ef100_reset,
-
- .check_caps = ef100_check_caps,
-
- .ev_probe = ef100_ev_probe,
- .ev_init = ef100_ev_init,
- .ev_fini = efx_mcdi_ev_fini,
- .ev_remove = efx_mcdi_ev_remove,
- .irq_handle_msi = ef100_msi_interrupt,
- .ev_process = ef100_ev_process,
- .ev_read_ack = ef100_ev_read_ack,
- .ev_test_generate = efx_ef100_ev_test_generate,
- .tx_probe = ef100_tx_probe,
- .tx_init = ef100_tx_init,
- .tx_write = ef100_tx_write,
- .tx_enqueue = ef100_enqueue_skb,
- .rx_probe = efx_mcdi_rx_probe,
- .rx_init = efx_mcdi_rx_init,
- .rx_remove = efx_mcdi_rx_remove,
- .rx_write = ef100_rx_write,
- .rx_packet = __ef100_rx_packet,
- .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
- .fini_dmaq = efx_fini_dmaq,
- .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
- .filter_table_probe = ef100_filter_table_up,
- .filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = ef100_filter_table_down,
- .filter_insert = efx_mcdi_filter_insert,
- .filter_remove_safe = efx_mcdi_filter_remove_safe,
- .filter_get_safe = efx_mcdi_filter_get_safe,
- .filter_clear_rx = efx_mcdi_filter_clear_rx,
- .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
-#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
-#endif
-
- .get_phys_port_id = efx_ef100_get_phys_port_id,
-
- .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
- .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
- .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
- .rx_hash_key_size = 40,
- .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
- .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
- .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
- .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
- .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
- .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
-
- .reconfigure_mac = ef100_reconfigure_mac,
- .reconfigure_port = efx_mcdi_port_reconfigure,
- .test_nvram = efx_new_mcdi_nvram_test_all,
- .describe_stats = ef100_describe_stats,
- .start_stats = efx_mcdi_mac_start_stats,
- .update_stats = ef100_update_stats,
- .pull_stats = efx_mcdi_mac_pull_stats,
- .stop_stats = efx_mcdi_mac_stop_stats,
-#ifdef CONFIG_SFC_SRIOV
- .sriov_configure = efx_ef100_sriov_configure,
-#endif
-
- /* Per-type bar/size configuration not used on ef100. Location of
- * registers is defined by extended capabilities.
- */
- .mem_bar = NULL,
- .mem_map_size = NULL,
-
-};
-
-const struct efx_nic_type ef100_vf_nic_type = {
- .revision = EFX_REV_EF100,
- .is_vf = true,
- .probe = ef100_probe_vf,
- .offload_features = EF100_OFFLOAD_FEATURES,
- .mcdi_max_ver = 2,
- .mcdi_request = ef100_mcdi_request,
- .mcdi_poll_response = ef100_mcdi_poll_response,
- .mcdi_read_response = ef100_mcdi_read_response,
- .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
- .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
- .irq_enable_master = efx_port_dummy_op_void,
- .irq_test_generate = efx_ef100_irq_test_generate,
- .irq_disable_non_ev = efx_port_dummy_op_void,
- .push_irq_moderation = efx_channel_dummy_op_void,
- .min_interrupt_mode = EFX_INT_MODE_MSIX,
- .map_reset_reason = ef100_map_reset_reason,
- .map_reset_flags = ef100_map_reset_flags,
- .reset = ef100_reset,
- .check_caps = ef100_check_caps,
- .ev_probe = ef100_ev_probe,
- .ev_init = ef100_ev_init,
- .ev_fini = efx_mcdi_ev_fini,
- .ev_remove = efx_mcdi_ev_remove,
- .irq_handle_msi = ef100_msi_interrupt,
- .ev_process = ef100_ev_process,
- .ev_read_ack = ef100_ev_read_ack,
- .ev_test_generate = efx_ef100_ev_test_generate,
- .tx_probe = ef100_tx_probe,
- .tx_init = ef100_tx_init,
- .tx_write = ef100_tx_write,
- .tx_enqueue = ef100_enqueue_skb,
- .rx_probe = efx_mcdi_rx_probe,
- .rx_init = efx_mcdi_rx_init,
- .rx_remove = efx_mcdi_rx_remove,
- .rx_write = ef100_rx_write,
- .rx_packet = __ef100_rx_packet,
- .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
- .fini_dmaq = efx_fini_dmaq,
- .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
- .filter_table_probe = ef100_filter_table_up,
- .filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = ef100_filter_table_down,
- .filter_insert = efx_mcdi_filter_insert,
- .filter_remove_safe = efx_mcdi_filter_remove_safe,
- .filter_get_safe = efx_mcdi_filter_get_safe,
- .filter_clear_rx = efx_mcdi_filter_clear_rx,
- .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
-#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
-#endif
-
- .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
- .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
- .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
- .rx_hash_key_size = 40,
- .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
- .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
- .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
- .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
-
- .reconfigure_mac = ef100_reconfigure_mac,
- .test_nvram = efx_new_mcdi_nvram_test_all,
- .describe_stats = ef100_describe_stats,
- .start_stats = efx_mcdi_mac_start_stats,
- .update_stats = ef100_update_stats,
- .pull_stats = efx_mcdi_mac_pull_stats,
- .stop_stats = efx_mcdi_mac_stop_stats,
-
- .mem_bar = NULL,
- .mem_map_size = NULL,
-
-};
-
static int compare_versions(const char *a, const char *b)
{
int a_major, a_minor, a_point, a_patch;
@@ -1077,8 +905,7 @@ static int ef100_check_design_params(struct efx_nic *efx)
efx_readd(efx, &reg, ER_GZ_PARAMS_TLV_LEN);
total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
- netif_dbg(efx, probe, efx->net_dev, "%u bytes of design parameters\n",
- total_len);
+ pci_dbg(efx->pci_dev, "%u bytes of design parameters\n", total_len);
while (offset < total_len) {
efx_readd(efx, &reg, ER_GZ_PARAMS_TLV + offset);
data = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
@@ -1117,7 +944,6 @@ out:
static int ef100_probe_main(struct efx_nic *efx)
{
unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
- struct net_device *net_dev = efx->net_dev;
struct ef100_nic_data *nic_data;
char fw_version[32];
int i, rc;
@@ -1130,24 +956,18 @@ static int ef100_probe_main(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
nic_data->efx = efx;
- net_dev->features |= efx->type->offload_features;
- net_dev->hw_features |= efx->type->offload_features;
- net_dev->hw_enc_features |= efx->type->offload_features;
- net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
+ efx->max_vis = EF100_MAX_VIS;
/* Populate design-parameter defaults */
nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
- netif_set_tso_max_segs(net_dev,
- ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
+
/* Read design parameters */
rc = ef100_check_design_params(efx);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unsupported design parameters\n");
+ pci_err(efx->pci_dev, "Unsupported design parameters\n");
goto fail;
}
@@ -1184,12 +1004,6 @@ static int ef100_probe_main(struct efx_nic *efx)
/* Post-IO section. */
rc = efx_mcdi_init(efx);
- if (!rc && efx->mcdi->fn_flags &
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
- netif_info(efx, probe, efx->net_dev,
- "No network port on this PCI function");
- rc = -ENODEV;
- }
if (rc)
goto fail;
/* Reset (most) configuration for this function */
@@ -1205,67 +1019,37 @@ static int ef100_probe_main(struct efx_nic *efx)
if (rc)
goto fail;
- rc = efx_ef100_init_datapath_caps(efx);
- if (rc < 0)
- goto fail;
-
- efx->max_vis = EF100_MAX_VIS;
-
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
goto fail;
efx->port_num = rc;
efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version));
- netif_dbg(efx, drv, efx->net_dev, "Firmware version %s\n", fw_version);
+ pci_dbg(efx->pci_dev, "Firmware version %s\n", fw_version);
if (compare_versions(fw_version, "1.1.0.1000") < 0) {
- netif_info(efx, drv, efx->net_dev, "Firmware uses old event descriptors\n");
+ pci_info(efx->pci_dev, "Firmware uses old event descriptors\n");
rc = -EINVAL;
goto fail;
}
if (efx_has_cap(efx, UNSOL_EV_CREDIT_SUPPORTED)) {
- netif_info(efx, drv, efx->net_dev, "Firmware uses unsolicited-event credits\n");
+ pci_info(efx->pci_dev, "Firmware uses unsolicited-event credits\n");
rc = -EINVAL;
goto fail;
}
- rc = ef100_phy_probe(efx);
- if (rc)
- goto fail;
-
- down_write(&efx->filter_sem);
- rc = ef100_filter_table_probe(efx);
- up_write(&efx->filter_sem);
- if (rc)
- goto fail;
-
- netdev_rss_key_fill(efx->rss_context.rx_hash_key,
- sizeof(efx->rss_context.rx_hash_key));
-
- /* Don't fail init if RSS setup doesn't work. */
- efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
-
- rc = ef100_register_netdev(efx);
- if (rc)
- goto fail;
-
return 0;
fail:
return rc;
}
-int ef100_probe_pf(struct efx_nic *efx)
+int ef100_probe_netdev_pf(struct efx_nic *efx)
{
+ struct ef100_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
- struct ef100_nic_data *nic_data;
- int rc = ef100_probe_main(efx);
-
- if (rc)
- goto fail;
+ int rc;
- nic_data = efx->nic_data;
rc = ef100_get_mac_address(efx, net_dev->perm_addr);
if (rc)
goto fail;
@@ -1288,14 +1072,6 @@ void ef100_remove(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
- ef100_unregister_netdev(efx);
-
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
- up_write(&efx->filter_sem);
- efx_fini_channels(efx);
- kfree(efx->phy_data);
- efx->phy_data = NULL;
efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
if (nic_data)
@@ -1303,3 +1079,175 @@ void ef100_remove(struct efx_nic *efx)
kfree(nic_data);
efx->nic_data = NULL;
}
+
+/* NIC level access functions
+ */
+#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
+ NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
+ NETIF_F_HW_VLAN_CTAG_TX)
+
+const struct efx_nic_type ef100_pf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = false,
+ .probe = ef100_probe_main,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+
+ .check_caps = ef100_check_caps,
+
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+
+ .get_phys_port_id = efx_ef100_get_phys_port_id,
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_ef100_sriov_configure,
+#endif
+
+ /* Per-type bar/size configuration not used on ef100. Location of
+ * registers is defined by extended capabilities.
+ */
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
+
+const struct efx_nic_type ef100_vf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = true,
+ .probe = ef100_probe_vf,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+ .check_caps = ef100_check_caps,
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
index e799688d5264..744dbbdb4adc 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.h
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -8,6 +8,8 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
+#ifndef EFX_EF100_NIC_H
+#define EFX_EF100_NIC_H
#include "net_driver.h"
#include "nic_common.h"
@@ -15,7 +17,7 @@
extern const struct efx_nic_type ef100_pf_nic_type;
extern const struct efx_nic_type ef100_vf_nic_type;
-int ef100_probe_pf(struct efx_nic *efx);
+int ef100_probe_netdev_pf(struct efx_nic *efx);
int ef100_probe_vf(struct efx_nic *efx);
void ef100_remove(struct efx_nic *efx);
@@ -78,3 +80,9 @@ struct ef100_nic_data {
#define efx_ef100_has_cap(caps, flag) \
(!!((caps) & BIT_ULL(MC_CMD_GET_CAPABILITIES_V4_OUT_ ## flag ## _LBN)))
+
+int efx_ef100_init_datapath_caps(struct efx_nic *efx);
+int ef100_phy_probe(struct efx_nic *efx);
+int ef100_filter_table_probe(struct efx_nic *efx);
+
+#endif /* EFX_EF100_NIC_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 5a772354da83..153d68e29b8b 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -106,14 +106,6 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags);
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
- do { \
- if ((efx->state == STATE_READY) || \
- (efx->state == STATE_RECOVERY) || \
- (efx->state == STATE_DISABLED)) \
- ASSERT_RTNL(); \
- } while (0)
-
/**************************************************************************
*
* Port handling
@@ -378,6 +370,8 @@ static int efx_probe_all(struct efx_nic *efx)
if (rc)
goto fail5;
+ efx->state = STATE_NET_DOWN;
+
return 0;
fail5:
@@ -498,7 +492,7 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
*/
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
if (cmd == SIOCSHWTSTAMP)
@@ -523,7 +517,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
/* Context: process, rtnl_lock() held. */
int efx_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
@@ -544,6 +538,9 @@ int efx_net_open(struct net_device *net_dev)
efx_start_all(efx);
if (efx->state == STATE_DISABLED || efx->reset_pending)
netif_device_detach(efx->net_dev);
+ else
+ efx->state = STATE_NET_UP;
+
efx_selftest_async_start(efx);
return 0;
}
@@ -554,7 +551,7 @@ int efx_net_open(struct net_device *net_dev)
*/
int efx_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
@@ -567,7 +564,7 @@ int efx_net_stop(struct net_device *net_dev)
static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_add_vid)
return efx->type->vlan_rx_add_vid(efx, proto, vid);
@@ -577,7 +574,7 @@ static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid
static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_kill_vid)
return efx->type->vlan_rx_kill_vid(efx, proto, vid);
@@ -646,7 +643,7 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
/* Context: process, rtnl_lock() held. */
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -659,7 +656,7 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
@@ -681,7 +678,7 @@ static int efx_netdev_event(struct notifier_block *this,
if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
- efx_update_name(netdev_priv(net_dev));
+ efx_update_name(efx_netdev_priv(net_dev));
return NOTIFY_DONE;
}
@@ -720,8 +717,6 @@ static int efx_register_netdev(struct efx_nic *efx)
* already requested. If so, the NIC is probably hosed so we
* abort.
*/
- efx->state = STATE_READY;
- smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) {
pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
rc = -EIO;
@@ -748,6 +743,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_associate(efx);
+ efx->state = STATE_NET_DOWN;
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -777,7 +774,8 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (!efx->net_dev)
return;
- BUG_ON(netdev_priv(efx->net_dev) != efx);
+ if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx))
+ return;
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
@@ -845,7 +843,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
/* Flush reset_work. It can no longer be scheduled since we
* are not READY.
*/
- BUG_ON(efx->state == STATE_READY);
+ WARN_ON(efx_net_active(efx->state));
efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx);
@@ -863,6 +861,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
+ struct efx_probe_data *probe_data;
struct efx_nic *efx;
efx = pci_get_drvdata(pci_dev);
@@ -887,10 +886,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
efx_fini_io(efx);
- netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+ pci_dbg(efx->pci_dev, "shutdown successful\n");
efx_fini_struct(efx);
free_netdev(efx->net_dev);
+ probe_data = container_of(efx, struct efx_probe_data, efx);
+ kfree(probe_data);
pci_disable_pcie_error_reporting(pci_dev);
};
@@ -1044,24 +1045,34 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
static int efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
+ struct efx_probe_data *probe_data, **probe_ptr;
struct net_device *net_dev;
struct efx_nic *efx;
int rc;
- /* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
- EFX_MAX_RX_QUEUES);
+ /* Allocate probe data and struct efx_nic */
+ probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
+ if (!probe_data)
+ return -ENOMEM;
+ probe_data->pci_dev = pci_dev;
+ efx = &probe_data->efx;
+
+ /* Allocate and initialise a struct net_device */
+ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
if (!net_dev)
return -ENOMEM;
- efx = netdev_priv(net_dev);
+ probe_ptr = netdev_priv(net_dev);
+ *probe_ptr = probe_data;
+ efx->net_dev = net_dev;
efx->type = (const struct efx_nic_type *) entry->driver_data;
efx->fixed_features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
+ efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
@@ -1150,13 +1161,13 @@ static int efx_pm_freeze(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
- efx->state = STATE_UNINIT;
-
+ if (efx_net_active(efx->state)) {
efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_disable_interrupts(efx);
+
+ efx->state = efx_freeze(efx->state);
}
rtnl_unlock();
@@ -1171,7 +1182,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
+ if (efx_frozen(efx->state)) {
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
@@ -1184,7 +1195,7 @@ static int efx_pm_thaw(struct device *dev)
efx_device_attach_if_not_resetting(efx);
- efx->state = STATE_READY;
+ efx->state = efx_thaw(efx->state);
efx->type->resume_wol(efx);
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index f6577e74d6e6..56eb717bb07a 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -167,7 +167,7 @@ static void efx_mac_work(struct work_struct *data)
int efx_set_mac_address(struct net_device *net_dev, void *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct sockaddr *addr = data;
u8 *new_addr = addr->sa_data;
u8 old_addr[6];
@@ -202,7 +202,7 @@ int efx_set_mac_address(struct net_device *net_dev, void *data)
/* Context: netif_addr_lock held, BHs disabled. */
void efx_set_rx_mode(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
@@ -211,7 +211,7 @@ void efx_set_rx_mode(struct net_device *net_dev)
int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
/* If disabling RX n-tuple filtering, clear existing filters */
@@ -285,7 +285,7 @@ unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
/* Context: process, rtnl_lock() held. */
int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx_check_disabled(efx);
@@ -600,7 +600,7 @@ void efx_stop_all(struct efx_nic *efx)
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
efx_nic_update_stats_atomic(efx, NULL, stats);
@@ -723,7 +723,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
/* Context: netif_tx_lock held, BHs disabled. */
void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_err(efx, tx_err, efx->net_dev,
"TX stuck with port_enabled=%d: resetting channels\n",
@@ -898,7 +898,7 @@ static void efx_reset_work(struct work_struct *data)
* have changed by now. Now that we have the RTNL lock,
* it cannot change again.
*/
- if (efx->state == STATE_READY)
+ if (efx_net_active(efx->state))
(void)efx_reset(efx, method);
rtnl_unlock();
@@ -908,7 +908,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
- if (efx->state == STATE_RECOVERY) {
+ if (efx_recovering(efx->state)) {
netif_dbg(efx, drv, efx->net_dev,
"recovering: skip scheduling %s reset\n",
RESET_TYPE(type));
@@ -943,7 +943,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (READ_ONCE(efx->state) != STATE_READY)
+ if (!efx_net_active(READ_ONCE(efx->state)))
return;
/* efx_process_channel() will no longer read events once a
@@ -978,8 +978,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx) {}
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
-int efx_init_struct(struct efx_nic *efx,
- struct pci_dev *pci_dev, struct net_device *net_dev)
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
{
int rc = -ENOMEM;
@@ -998,7 +997,6 @@ int efx_init_struct(struct efx_nic *efx,
efx->state = STATE_UNINIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
- efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
efx->rx_ip_align =
NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
@@ -1023,7 +1021,6 @@ int efx_init_struct(struct efx_nic *efx,
efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
sizeof(*efx->rps_hash_table), GFP_KERNEL);
#endif
- efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
@@ -1077,13 +1074,11 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
int rc;
efx->mem_bar = UINT_MAX;
-
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar);
+ pci_dbg(pci_dev, "initialising I/O bar=%d\n", bar);
rc = pci_enable_device(pci_dev);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to enable PCI device\n");
+ pci_err(pci_dev, "failed to enable PCI device\n");
goto fail1;
}
@@ -1091,42 +1086,40 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "could not find a suitable DMA mask\n");
+ pci_err(efx->pci_dev, "could not find a suitable DMA mask\n");
goto fail2;
}
- netif_dbg(efx, probe, efx->net_dev,
- "using DMA mask %llx\n", (unsigned long long)dma_mask);
+ pci_dbg(efx->pci_dev, "using DMA mask %llx\n", (unsigned long long)dma_mask);
efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
if (!efx->membase_phys) {
- netif_err(efx, probe, efx->net_dev,
- "ERROR: No BAR%d mapping from the BIOS. "
- "Try pci=realloc on the kernel command line\n", bar);
+ pci_err(efx->pci_dev,
+ "ERROR: No BAR%d mapping from the BIOS. Try pci=realloc on the kernel command line\n",
+ bar);
rc = -ENODEV;
goto fail3;
}
rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "request for memory BAR[%d] failed\n", bar);
+ pci_err(efx->pci_dev,
+ "request for memory BAR[%d] failed\n", bar);
rc = -EIO;
goto fail3;
}
efx->mem_bar = bar;
efx->membase = ioremap(efx->membase_phys, mem_map_size);
if (!efx->membase) {
- netif_err(efx, probe, efx->net_dev,
- "could not map memory BAR[%d] at %llx+%x\n", bar,
- (unsigned long long)efx->membase_phys, mem_map_size);
+ pci_err(efx->pci_dev,
+ "could not map memory BAR[%d] at %llx+%x\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size);
rc = -ENOMEM;
goto fail4;
}
- netif_dbg(efx, probe, efx->net_dev,
- "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
- (unsigned long long)efx->membase_phys, mem_map_size,
- efx->membase);
+ pci_dbg(efx->pci_dev,
+ "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
return 0;
@@ -1142,7 +1135,7 @@ fail1:
void efx_fini_io(struct efx_nic *efx)
{
- netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+ pci_dbg(efx->pci_dev, "shutting down I/O\n");
if (efx->membase) {
iounmap(efx->membase);
@@ -1217,13 +1210,15 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- efx->state = STATE_RECOVERY;
+ efx->state = efx_recover(efx->state);
efx->reset_pending = 0;
efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
+ if (efx_net_active(efx->state)) {
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+ }
status = PCI_ERS_RESULT_NEED_RESET;
} else {
@@ -1271,7 +1266,7 @@ static void efx_io_resume(struct pci_dev *pdev)
netif_err(efx, hw, efx->net_dev,
"efx_reset failed after PCI error (%d)\n", rc);
} else {
- efx->state = STATE_READY;
+ efx->state = efx_recovered(efx->state);
netif_dbg(efx, hw, efx->net_dev,
"Done resetting and resuming IO after PCI error.\n");
}
@@ -1357,7 +1352,7 @@ static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
if (skb->encapsulation) {
if (features & NETIF_F_GSO_MASK)
@@ -1378,7 +1373,7 @@ netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev
int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->get_phys_port_id)
return efx->type->get_phys_port_id(efx, ppid);
@@ -1388,7 +1383,7 @@ int efx_get_phys_port_id(struct net_device *net_dev,
int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (snprintf(name, len, "p%u", efx->port_num) >= len)
return -EINVAL;
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
index 65513fd0cf6c..93babc1a2678 100644
--- a/drivers/net/ethernet/sfc/efx_common.h
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -14,8 +14,7 @@
int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
unsigned int mem_map_size);
void efx_fini_io(struct efx_nic *efx);
-int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
- struct net_device *net_dev);
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev);
void efx_fini_struct(struct efx_nic *efx);
#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -43,12 +42,11 @@ void efx_start_monitor(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx);
int efx_reconfigure_port(struct efx_nic *efx);
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
- do { \
- if ((efx->state == STATE_READY) || \
- (efx->state == STATE_RECOVERY) || \
- (efx->state == STATE_DISABLED)) \
- ASSERT_RTNL(); \
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx)->state != STATE_UNINIT && \
+ (efx)->state != STATE_PROBED) \
+ ASSERT_RTNL(); \
} while (0)
int efx_try_recovery(struct efx_nic *efx);
@@ -64,7 +62,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx);
static inline int efx_check_disabled(struct efx_nic *efx)
{
- if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ if (efx->state == STATE_DISABLED || efx_recovering(efx->state)) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 48506373721a..364323599f7b 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -33,7 +33,7 @@
static int efx_ethtool_phys_id(struct net_device *net_dev,
enum ethtool_phys_id_state state)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
enum efx_led_mode mode = EFX_LED_DEFAULT;
switch (state) {
@@ -55,13 +55,13 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
{
- return efx_nic_get_regs_len(netdev_priv(net_dev));
+ return efx_nic_get_regs_len(efx_netdev_priv(net_dev));
}
static void efx_ethtool_get_regs(struct net_device *net_dev,
struct ethtool_regs *regs, void *buf)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
regs->version = efx->type->revision;
efx_nic_get_regs(efx, buf);
@@ -101,7 +101,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
unsigned int tx_usecs, rx_usecs;
bool rx_adaptive;
@@ -121,7 +121,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_channel *channel;
unsigned int tx_usecs, rx_usecs;
bool adaptive, rx_may_override_tx;
@@ -163,7 +163,7 @@ efx_ethtool_get_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
@@ -177,7 +177,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
@@ -204,7 +204,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev,
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->get_wol(efx, wol);
}
@@ -212,14 +212,14 @@ static void efx_ethtool_get_wol(struct net_device *net_dev,
static int efx_ethtool_set_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->set_wol(efx, wol->wolopts);
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
struct ethtool_fec_stats *fec_stats)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->get_fec_stats)
efx->type->get_fec_stats(efx, fec_stats);
@@ -228,7 +228,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
struct ethtool_ts_info *ts_info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
/* Software capabilities */
ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index bd552c7dffcb..58ad9d665805 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -103,7 +103,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
efx_mcdi_print_fwver(efx, info->fw_version,
@@ -113,14 +113,14 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev,
u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->msg_enable;
}
void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
efx->msg_enable = msg_enable;
}
@@ -128,7 +128,7 @@ void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
bool already_up;
int rc = -ENOMEM;
@@ -137,7 +137,7 @@ void efx_ethtool_self_test(struct net_device *net_dev,
if (!efx_tests)
goto fail;
- if (efx->state != STATE_READY) {
+ if (!efx_net_active(efx->state)) {
rc = -EBUSY;
goto out;
}
@@ -176,7 +176,7 @@ fail:
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
@@ -186,7 +186,7 @@ void efx_ethtool_get_pauseparam(struct net_device *net_dev,
int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u8 wanted_fc, old_fc;
u32 old_adv;
int rc = 0;
@@ -441,7 +441,7 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
switch (string_set) {
case ETH_SS_STATS:
@@ -459,7 +459,7 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int i;
switch (string_set) {
@@ -487,7 +487,7 @@ void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
const struct efx_sw_stat_desc *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
@@ -561,7 +561,7 @@ void efx_ethtool_get_stats(struct net_device *net_dev,
int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
mutex_lock(&efx->mac_lock);
@@ -584,7 +584,7 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
/* GMAC does not support 1000Mbps HD */
@@ -604,7 +604,7 @@ int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
int efx_ethtool_get_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -617,7 +617,7 @@ int efx_ethtool_get_fecparam(struct net_device *net_dev,
int efx_ethtool_set_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -809,7 +809,7 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 rss_context = 0;
s32 rc = 0;
@@ -1127,7 +1127,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
int efx_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx_filter_get_rx_id_limit(efx) == 0)
return -EOPNOTSUPP;
@@ -1148,7 +1148,7 @@ int efx_ethtool_set_rxnfc(struct net_device *net_dev,
u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->n_rx_channels == 1)
return 0;
@@ -1157,7 +1157,7 @@ u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->rx_hash_key_size;
}
@@ -1165,7 +1165,7 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx->type->rx_pull_rss_config(efx);
@@ -1186,7 +1186,7 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
/* Hash function is Toeplitz, cannot be changed */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
@@ -1205,7 +1205,7 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
int rc = 0;
@@ -1238,7 +1238,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
const u8 hfunc, u32 *rss_context,
bool delete)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
bool allocated = false;
int rc;
@@ -1300,7 +1300,7 @@ out_unlock:
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx->type->map_reset_flags(flags);
@@ -1314,7 +1314,7 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
u8 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int ret;
mutex_lock(&efx->mac_lock);
@@ -1327,7 +1327,7 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
int efx_ethtool_get_module_info(struct net_device *net_dev,
struct ethtool_modinfo *modinfo)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int ret;
mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/falcon/bitfield.h b/drivers/net/ethernet/sfc/falcon/bitfield.h
index 5eb178d0c149..78537a53009e 100644
--- a/drivers/net/ethernet/sfc/falcon/bitfield.h
+++ b/drivers/net/ethernet/sfc/falcon/bitfield.h
@@ -117,7 +117,7 @@ typedef union ef4_oword {
*
* ( element ) << 4
*
- * The result will contain the relevant bits filled in in the range
+ * The result will contain the relevant bits filled in the range
* [0,high-low), with garbage in bits [high-low+1,...).
*/
#define EF4_EXTRACT_NATIVE(native_element, min, max, low, high) \
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 2c91792cec01..c64623c2e80c 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -2711,7 +2711,7 @@ void ef4_farch_filter_table_remove(struct ef4_nic *efx)
enum ef4_farch_filter_table_id table_id;
for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
+ bitmap_free(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
kfree(state);
@@ -2740,9 +2740,7 @@ int ef4_farch_filter_table_probe(struct ef4_nic *efx)
table = &state->table[table_id];
if (table->size == 0)
continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
+ table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(array_size(sizeof(*table->spec),
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index a3425b6be3f7..3225fe64c397 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -99,14 +99,12 @@ int efx_mcdi_init(struct efx_nic *efx)
*/
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unable to register driver with MCPU\n");
+ pci_err(efx->pci_dev, "Unable to register driver with MCPU\n");
goto fail2;
}
if (already_attached)
/* Not a fatal error */
- netif_err(efx, probe, efx->net_dev,
- "Host already registered with MCPU\n");
+ pci_err(efx->pci_dev, "Host already registered with MCPU\n");
if (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
@@ -1447,7 +1445,7 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
return;
fail:
- netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
buf[0] = 0;
}
@@ -1471,8 +1469,9 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
* care what firmware we get.
*/
if (rc == -EPERM) {
- netif_dbg(efx, probe, efx->net_dev,
- "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+ pci_dbg(efx->pci_dev,
+ "%s with fw-variant setting failed EPERM, trying without it\n",
+ __func__);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
MC_CMD_FW_DONT_CARE);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
@@ -1514,7 +1513,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
return 0;
fail:
- netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index ff617b1b38d3..7984f6f84a3c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -274,7 +274,7 @@
* MC_CMD_WORKAROUND_BUG26807.
* May also returned for other operations such as sub-variant switching. */
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* The clock whose frequency you've attempted to set set
+/* The clock whose frequency you've attempted to set
* doesn't exist on this NIC */
#define MC_CMD_ERR_NO_CLOCK 0x1015
/* Returned by MC_CMD_TESTASSERT if the action that should
@@ -7782,7 +7782,7 @@
* large number (253) it is not anticipated that this will be needed in the
* near future, so can currently be ignored.
*
- * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * On Riverhead this command is implemented as a wrapper for `list` in the
* sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
@@ -7827,7 +7827,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for
+ * On Riverhead this command is implemented as a wrapper for
* `get_descriptions` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
@@ -7876,7 +7876,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * On Riverhead this command is implemented as a wrapper for `get_readings`
* in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
@@ -19322,7 +19322,7 @@
* TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
* contains all modes implemented in firmware for a particular board. Modes
* listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
* should be considered hidden (not to be exposed in userland tools) and for
* engineering use only. There are no other semantic differences and any mode
* listed in either MODES or ENGINEERING_MODES can be set on the board.
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 94c6a345c0b1..ad4694fa3dda 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -20,7 +20,7 @@
static int efx_mcdi_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
size_t outlen;
@@ -46,7 +46,7 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
static int efx_mcdi_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
size_t outlen;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 723bbeea5d0c..2228c88a7f31 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -622,12 +622,55 @@ enum efx_int_mode {
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
enum nic_state {
- STATE_UNINIT = 0, /* device being probed/removed or is frozen */
- STATE_READY = 1, /* hardware ready and netdev registered */
- STATE_DISABLED = 2, /* device disabled due to hardware errors */
- STATE_RECOVERY = 3, /* device recovering from PCI error */
+ STATE_UNINIT = 0, /* device being probed/removed */
+ STATE_PROBED, /* hardware probed */
+ STATE_NET_DOWN, /* netdev registered */
+ STATE_NET_UP, /* ready for traffic */
+ STATE_DISABLED, /* device disabled due to hardware errors */
+
+ STATE_RECOVERY = 0x100,/* recovering from PCI error */
+ STATE_FROZEN = 0x200, /* frozen by power management */
};
+static inline bool efx_net_active(enum nic_state state)
+{
+ return state == STATE_NET_DOWN || state == STATE_NET_UP;
+}
+
+static inline bool efx_frozen(enum nic_state state)
+{
+ return state & STATE_FROZEN;
+}
+
+static inline bool efx_recovering(enum nic_state state)
+{
+ return state & STATE_RECOVERY;
+}
+
+static inline enum nic_state efx_freeze(enum nic_state state)
+{
+ WARN_ON(!efx_net_active(state));
+ return state | STATE_FROZEN;
+}
+
+static inline enum nic_state efx_thaw(enum nic_state state)
+{
+ WARN_ON(!efx_frozen(state));
+ return state & ~STATE_FROZEN;
+}
+
+static inline enum nic_state efx_recover(enum nic_state state)
+{
+ WARN_ON(!efx_net_active(state));
+ return state | STATE_RECOVERY;
+}
+
+static inline enum nic_state efx_recovered(enum nic_state state)
+{
+ WARN_ON(!efx_recovering(state));
+ return state & ~STATE_RECOVERY;
+}
+
/* Forward declaration */
struct efx_nic;
@@ -1123,6 +1166,24 @@ struct efx_nic {
atomic_t n_rx_noskb_drops;
};
+/**
+ * struct efx_probe_data - State after hardware probe
+ * @pci_dev: The PCI device
+ * @efx: Efx NIC details
+ */
+struct efx_probe_data {
+ struct pci_dev *pci_dev;
+ struct efx_nic efx;
+};
+
+static inline struct efx_nic *efx_netdev_priv(struct net_device *dev)
+{
+ struct efx_probe_data **probe_ptr = netdev_priv(dev);
+ struct efx_probe_data *probe_data = *probe_ptr;
+
+ return &probe_data->efx;
+}
+
static inline int efx_dev_registered(struct efx_nic *efx)
{
return efx->net_dev->reg_state == NETREG_REGISTERED;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index fa8b9aacca11..bd21d6ac778a 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -857,7 +857,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
{
struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
work);
- struct efx_nic *efx = netdev_priv(req->net_dev);
+ struct efx_nic *efx = efx_netdev_priv(req->net_dev);
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
int slot_idx = req - efx->rps_slot;
struct efx_arfs_rule *rule;
@@ -942,7 +942,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_async_filter_insertion *req;
struct efx_arfs_rule *rule;
struct flow_keys fk;
diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c
index cce23803c652..89ccd65c978b 100644
--- a/drivers/net/ethernet/sfc/siena/farch.c
+++ b/drivers/net/ethernet/sfc/siena/farch.c
@@ -2778,7 +2778,7 @@ void efx_farch_filter_table_remove(struct efx_nic *efx)
enum efx_farch_filter_table_id table_id;
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
+ bitmap_free(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
kfree(state);
@@ -2822,9 +2822,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
table = &state->table[table_id];
if (table->size == 0)
continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
+ table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(array_size(sizeof(*table->spec),
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
index 89a7fd47b057..a3cc8b7ec732 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
@@ -274,7 +274,7 @@
* MC_CMD_WORKAROUND_BUG26807.
* May also returned for other operations such as sub-variant switching. */
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* The clock whose frequency you've attempted to set set
+/* The clock whose frequency you've attempted to set
* doesn't exist on this NIC */
#define MC_CMD_ERR_NO_CLOCK 0x1015
/* Returned by MC_CMD_TESTASSERT if the action that should
@@ -7782,7 +7782,7 @@
* large number (253) it is not anticipated that this will be needed in the
* near future, so can currently be ignored.
*
- * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * On Riverhead this command is implemented as a wrapper for `list` in the
* sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
@@ -7827,7 +7827,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for
+ * On Riverhead this command is implemented as a wrapper for
* `get_descriptions` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
@@ -7876,7 +7876,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * On Riverhead this command is implemented as a wrapper for `get_readings`
* in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
@@ -16682,7 +16682,7 @@
* TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
* contains all modes implemented in firmware for a particular board. Modes
* listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
* should be considered hidden (not to be exposed in userland tools) and for
* engineering use only. There are no other semantic differences and any mode
* listed in either MODES or ENGINEERING_MODES can be set on the board.
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 3f241e6c881a..fc9f0189f285 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -10,7 +10,7 @@
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_mac)
return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
@@ -21,7 +21,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
u8 qos, __be16 vlan_proto)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_vlan) {
if ((vlan & ~VLAN_VID_MASK) ||
@@ -40,7 +40,7 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_spoofchk)
return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
@@ -51,7 +51,7 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_get_vf_config)
return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
@@ -62,7 +62,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
int link_state)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_link_state)
return efx->type->sriov_set_vf_link_state(efx, vf_i,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 138bca611341..79cc0bb76321 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -512,7 +512,7 @@ unlock:
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
unsigned index, type;
@@ -609,7 +609,7 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct tc_mqprio_qopt *mqprio = type_data;
unsigned tc, num_tc;
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index a0654e88444c..0329caf63279 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1515,14 +1515,14 @@ static void epic_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = netdev_priv(dev);
+ unregister_netdev(dev);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
ep->tx_ring_dma);
dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
ep->rx_ring_dma);
- unregister_netdev(dev);
pci_iounmap(pdev, ep->ioaddr);
- pci_release_regions(pdev);
free_netdev(dev);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
/* pci_power_off(pdev, -1); */
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a57b0fa815ab..ea4910ae0921 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -197,7 +197,7 @@ static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
MMC_CNTRL, value);
}
-/* To mask all all interrupts.*/
+/* To mask all interrupts.*/
static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fe263cad8248..6f14b00c0b14 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3961,7 +3961,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
hdr = sizeof(struct udphdr);
} else {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
hdr = tcp_hdrlen(skb);
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 435dc00d04e5..0b08b0e085e8 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -29,7 +29,7 @@
* -- on page reclamation, the driver swaps the page with a spare page.
* if that page is still in use, it frees its reference to that page,
* and allocates a new page for use. otherwise, it just recycles the
- * the page.
+ * page.
*
* NOTE: cassini can parse the header. however, it's not worth it
* as long as the network stack requires a header copy.
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index ae5f05f03f88..2d91f4936d52 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -764,7 +764,7 @@
* PAUSE thresholds defined in terms of FIFO occupancy and may be translated
* into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
* when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
- * value is is 0x6F.
+ * value is 0x6F.
* DEFAULT: 0x00078
*/
#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 6b59b14e74b1..0cd8493b810f 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -335,7 +335,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
port->tsolen = 0;
/* Mark the port as belonging to ldmvsw which directs the
- * the common code to use the net_device in the vnet_port
+ * common code to use the net_device in the vnet_port
* rather than the net_device in the vnet (which is used
* by sunvnet). This bit is used by the VNET_PORT_TO_NET_DEVICE
* macro.
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 45bd89153de2..a14591b41acb 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1088,7 +1088,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
/* netif_stop_queue() must be done before checking
- * checking tx index in TX_BUFFS_AVAIL() below, because
+ * tx index in TX_BUFFS_AVAIL() below, because
* in gem_tx(), we update tx_old before checking for
* netif_queue_stopped().
*/
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 3773ce5e12cc..546206640492 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -494,7 +494,7 @@ static int spl2sw_probe(struct platform_device *pdev)
/* Add and enable napi. */
netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll, NAPI_POLL_WEIGHT);
napi_enable(&comm->rx_napi);
- netif_napi_add(ndev, &comm->tx_napi, spl2sw_tx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(ndev, &comm->tx_napi, spl2sw_tx_poll);
napi_enable(&comm->tx_napi);
return 0;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index d435519236e4..e54ce73396ee 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -81,7 +81,7 @@ static int xlgmac_prep_tso(struct sk_buff *skb,
if (ret)
return ret;
- pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pkt_info->header_len = skb_tcp_all_headers(skb);
pkt_info->tcp_header_len = tcp_hdrlen(skb);
pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
pkt_info->mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
new file mode 100644
index 000000000000..baa1f0a5cc37
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Wangxun network device configuration
+#
+
+config NET_VENDOR_WANGXUN
+ bool "Wangxun devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_WANGXUN
+
+config TXGBE
+ tristate "Wangxun(R) 10GbE PCI Express adapters support"
+ depends on PCI
+ help
+ This driver supports Wangxun(R) 10GbE PCI Express family of
+ adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called txgbe.
+
+endif # NET_VENDOR_WANGXUN
diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile
new file mode 100644
index 000000000000..c34db1bead25
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Wangxun network device drivers.
+#
+
+obj-$(CONFIG_TXGBE) += txgbe/
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
new file mode 100644
index 000000000000..431303ca75b4
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_TXGBE) += txgbe.o
+
+txgbe-objs := txgbe_main.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
new file mode 100644
index 000000000000..38ddbde0ed0f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_H_
+#define _TXGBE_H_
+
+#include "txgbe_type.h"
+
+#define TXGBE_MAX_FDIR_INDICES 63
+
+#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+
+/* board specific private data structure */
+struct txgbe_adapter {
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+};
+
+extern char txgbe_driver_name[];
+
+#endif /* _TXGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
new file mode 100644
index 000000000000..55c3c720b377
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/aer.h>
+#include <linux/etherdevice.h>
+
+#include "txgbe.h"
+
+char txgbe_driver_name[] = "txgbe";
+
+/* txgbe_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id txgbe_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static void txgbe_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ txgbe_dev_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * txgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in txgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * txgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int txgbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct txgbe_adapter *adapter = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ txgbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct txgbe_adapter),
+ TXGBE_MAX_TX_QUEUES,
+ TXGBE_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ adapter->io_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, adapter);
+
+ return 0;
+
+err_pci_release_regions:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * txgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * txgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void txgbe_remove(struct pci_dev *pdev)
+{
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver txgbe_driver = {
+ .name = txgbe_driver_name,
+ .id_table = txgbe_pci_tbl,
+ .probe = txgbe_probe,
+ .remove = txgbe_remove,
+ .shutdown = txgbe_shutdown,
+};
+
+module_pci_driver(txgbe_driver);
+
+MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
+MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
new file mode 100644
index 000000000000..b2e329f50bae
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_TYPE_H_
+#define _TXGBE_TYPE_H_
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+/************ txgbe_register.h ************/
+/* Vendor ID */
+#ifndef PCI_VENDOR_ID_WANGXUN
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+#endif
+
+/* Device IDs */
+#define TXGBE_DEV_ID_SP1000 0x1001
+#define TXGBE_DEV_ID_WX1820 0x2001
+
+/* Subsystem IDs */
+/* SFP */
+#define TXGBE_ID_SP1000_SFP 0x0000
+#define TXGBE_ID_WX1820_SFP 0x2000
+#define TXGBE_ID_SFP 0x00
+
+/* copper */
+#define TXGBE_ID_SP1000_XAUI 0x1010
+#define TXGBE_ID_WX1820_XAUI 0x2010
+#define TXGBE_ID_XAUI 0x10
+#define TXGBE_ID_SP1000_SGMII 0x1020
+#define TXGBE_ID_WX1820_SGMII 0x2020
+#define TXGBE_ID_SGMII 0x20
+/* backplane */
+#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030
+#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030
+#define TXGBE_ID_KR_KX_KX4 0x30
+/* MAC Interface */
+#define TXGBE_ID_SP1000_MAC_XAUI 0x1040
+#define TXGBE_ID_WX1820_MAC_XAUI 0x2040
+#define TXGBE_ID_MAC_XAUI 0x40
+#define TXGBE_ID_SP1000_MAC_SGMII 0x1060
+#define TXGBE_ID_WX1820_MAC_SGMII 0x2060
+#define TXGBE_ID_MAC_SGMII 0x60
+
+#define TXGBE_NCSI_SUP 0x8000
+#define TXGBE_NCSI_MASK 0x8000
+#define TXGBE_WOL_SUP 0x4000
+#define TXGBE_WOL_MASK 0x4000
+#define TXGBE_DEV_MASK 0xf0
+
+/* Combined interface*/
+#define TXGBE_ID_SFI_XAUI 0x50
+
+/* Revision ID */
+#define TXGBE_SP_MPW 1
+
+#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 48f544f6c999..2772a79cd3ed 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -106,7 +106,7 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* Return: 0 on success, -ETIMEDOUT on a timeout
*
* Writes the value to the requested register by first writing the value
- * into MWD register. The the MCR register is then appropriately setup
+ * into MWD register. The MCR register is then appropriately setup
* to finish the write operation.
*/
static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 89770c2e0ffb..3591b9edc9a1 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -29,6 +29,7 @@
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
@@ -156,7 +157,7 @@ struct eth_plat_info {
u8 phy; /* MII PHY ID, 0 - 31 */
u8 rxq; /* configurable, currently 0 - 31 only */
u8 txreadyq;
- u8 hwaddr[6];
+ u8 hwaddr[ETH_ALEN];
u8 npe; /* NPE instance used by this interface */
bool has_mdio; /* If this instance has an MDIO bus */
};
@@ -1387,6 +1388,7 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
struct of_phandle_args npe_spec;
struct device_node *mdio_np;
struct eth_plat_info *plat;
+ u8 mac[ETH_ALEN];
int ret;
plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
@@ -1428,6 +1430,12 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
}
plat->txreadyq = queue_spec.args[0];
+ ret = of_get_mac_address(np, mac);
+ if (!ret) {
+ dev_info(dev, "Setting macaddr from DT %pM\n", mac);
+ memcpy(plat->hwaddr, mac, ETH_ALEN);
+ }
+
return plat;
}
@@ -1487,7 +1495,10 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- eth_hw_addr_set(ndev, plat->hwaddr);
+ if (is_valid_ether_addr(plat->hwaddr))
+ eth_hw_addr_set(ndev, plat->hwaddr);
+ else
+ eth_hw_addr_random(ndev);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 45c3c4a1101b..9fb567524220 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -99,6 +99,7 @@ struct sixpack {
unsigned int rx_count;
unsigned int rx_count_cooked;
+ spinlock_t rxlock;
int mtu; /* Our mtu (to spot changes!) */
int buffsize; /* Max buffers sizes */
@@ -565,6 +566,7 @@ static int sixpack_open(struct tty_struct *tty)
sp->dev = dev;
spin_lock_init(&sp->lock);
+ spin_lock_init(&sp->rxlock);
refcount_set(&sp->refcnt, 1);
init_completion(&sp->dead);
@@ -913,6 +915,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
sp->led_state = 0x60;
/* fill trailing bytes with zeroes */
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
+ spin_lock_bh(&sp->rxlock);
rest = sp->rx_count;
if (rest != 0)
for (i = rest; i <= 3; i++)
@@ -930,6 +933,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
sp_bump(sp, 0);
}
sp->rx_count_cooked = 0;
+ spin_unlock_bh(&sp->rxlock);
}
break;
case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n");
@@ -959,8 +963,11 @@ sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
decode_prio_command(sp, inbyte);
else if ((inbyte & SIXP_STD_CMD_MASK) != 0)
decode_std_command(sp, inbyte);
- else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)
+ else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) {
+ spin_lock_bh(&sp->rxlock);
decode_data(sp, inbyte);
+ spin_unlock_bh(&sp->rxlock);
+ }
}
}
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index cf646dc8e36a..29496ca15825 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -339,7 +339,7 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
if (!gsi_trans_tre_reserve(trans_info, tre_count))
return NULL;
- /* Allocate and initialize non-zero fields in the the transaction */
+ /* Allocate and initialize non-zero fields in the transaction */
trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
trans->gsi = gsi;
trans->channel_id = channel_id;
@@ -669,7 +669,7 @@ int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
if (!gsi_trans_tre_reserve(trans_info, 1))
return -EBUSY;
- /* Now fill the the reserved TRE and tell the hardware */
+ /* Now fill the reserved TRE and tell the hardware */
dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index 22ba7b0b476d..6289b7c765f1 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -6,8 +6,8 @@
menu "PCS device drivers"
config PCS_XPCS
- tristate "Synopsys DesignWare XPCS controller"
- depends on MDIO_DEVICE && MDIO_BUS
+ tristate
+ select PHYLINK
help
This module provides helper functions for Synopsys DesignWare XPCS
controllers.
@@ -18,4 +18,12 @@ config PCS_LYNX
This module provides helpers to phylink for managing the Lynx PCS
which is part of the Layerscape and QorIQ Ethernet SERDES.
+config PCS_RZN1_MIIC
+ tristate "Renesas RZ/N1 MII converter"
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ help
+ This module provides a driver for the MII converter that is available
+ on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
+ pass-through mode for MII.
+
endmenu
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
index 0603d469bd57..0ff5388fcdea 100644
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
@@ -5,3 +5,4 @@ pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o
obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
+obj-$(CONFIG_PCS_RZN1_MIIC) += pcs-rzn1-miic.o
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index fd3445374955..7d5fc7f54b2f 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -71,12 +71,10 @@ static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
struct phylink_link_state *state)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
int bmsr, lpa;
- bmsr = mdiobus_read(bus, addr, MII_BMSR);
- lpa = mdiobus_read(bus, addr, MII_LPA);
+ bmsr = mdiodev_read(pcs, MII_BMSR);
+ lpa = mdiodev_read(pcs, MII_LPA);
if (bmsr < 0 || lpa < 0) {
state->link = false;
return;
@@ -124,57 +122,39 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs,
state->link, state->an_enabled, state->an_complete);
}
-static int lynx_pcs_config_1000basex(struct mdio_device *pcs,
- unsigned int mode,
- const unsigned long *advertising)
+static int lynx_pcs_config_giga(struct mdio_device *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
u32 link_timer;
- int err;
-
- link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS);
- mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
- mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
-
- err = mdiobus_modify(bus, addr, IF_MODE,
- IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
- 0);
- if (err)
- return err;
-
- return phylink_mii_c22_pcs_config(pcs, mode,
- PHY_INTERFACE_MODE_1000BASEX,
- advertising);
-}
-
-static int lynx_pcs_config_sgmii(struct mdio_device *pcs, unsigned int mode,
- const unsigned long *advertising)
-{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
u16 if_mode;
int err;
- if_mode = IF_MODE_SGMII_EN;
- if (mode == MLO_AN_INBAND) {
- u32 link_timer;
-
- if_mode |= IF_MODE_USE_SGMII_AN;
-
- /* Adjust link timer for SGMII */
- link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
- mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
- mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
+ if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS);
+ mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
+
+ if_mode = 0;
+ } else {
+ if_mode = IF_MODE_SGMII_EN;
+ if (mode == MLO_AN_INBAND) {
+ if_mode |= IF_MODE_USE_SGMII_AN;
+
+ /* Adjust link timer for SGMII */
+ link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
+ mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
+ }
}
- err = mdiobus_modify(bus, addr, IF_MODE,
+
+ err = mdiodev_modify(pcs, IF_MODE,
IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
if_mode);
if (err)
return err;
- return phylink_mii_c22_pcs_config(pcs, mode, PHY_INTERFACE_MODE_SGMII,
- advertising);
+ return phylink_mii_c22_pcs_config(pcs, mode, interface, advertising);
}
static int lynx_pcs_config_usxgmii(struct mdio_device *pcs, unsigned int mode,
@@ -204,10 +184,10 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
switch (ifmode) {
case PHY_INTERFACE_MODE_1000BASEX:
- return lynx_pcs_config_1000basex(lynx->mdio, mode, advertising);
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- return lynx_pcs_config_sgmii(lynx->mdio, mode, advertising);
+ return lynx_pcs_config_giga(lynx->mdio, mode, ifmode,
+ advertising);
case PHY_INTERFACE_MODE_2500BASEX:
if (phylink_autoneg_inband(mode)) {
dev_err(&lynx->mdio->dev,
@@ -237,9 +217,7 @@ static void lynx_pcs_an_restart(struct phylink_pcs *pcs)
static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
int speed, int duplex)
{
- struct mii_bus *bus = pcs->bus;
u16 if_mode = 0, sgmii_speed;
- int addr = pcs->addr;
/* The PCS needs to be configured manually only
* when not operating on in-band mode
@@ -269,7 +247,7 @@ static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
}
if_mode |= IF_MODE_SPEED(sgmii_speed);
- mdiobus_modify(bus, addr, IF_MODE,
+ mdiodev_modify(pcs, IF_MODE,
IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
if_mode);
}
@@ -294,8 +272,6 @@ static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
unsigned int mode,
int speed, int duplex)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
u16 if_mode = 0;
if (mode == MLO_AN_INBAND) {
@@ -307,7 +283,7 @@ static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
if_mode |= IF_MODE_HALF_DUPLEX;
if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500);
- mdiobus_modify(bus, addr, IF_MODE,
+ mdiodev_modify(pcs, IF_MODE,
IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
if_mode);
}
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
new file mode 100644
index 000000000000..c1424119e821
--- /dev/null
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mdio.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <linux/phylink.h>
+#include <linux/pm_runtime.h>
+#include <dt-bindings/net/pcs-rzn1-miic.h>
+
+#define MIIC_PRCMD 0x0
+#define MIIC_ESID_CODE 0x4
+
+#define MIIC_MODCTRL 0x20
+#define MIIC_MODCTRL_SW_MODE GENMASK(4, 0)
+
+#define MIIC_CONVCTRL(port) (0x100 + (port) * 4)
+
+#define MIIC_CONVCTRL_CONV_SPEED GENMASK(1, 0)
+#define CONV_MODE_10MBPS 0
+#define CONV_MODE_100MBPS 1
+#define CONV_MODE_1000MBPS 2
+
+#define MIIC_CONVCTRL_CONV_MODE GENMASK(3, 2)
+#define CONV_MODE_MII 0
+#define CONV_MODE_RMII 1
+#define CONV_MODE_RGMII 2
+
+#define MIIC_CONVCTRL_FULLD BIT(8)
+#define MIIC_CONVCTRL_RGMII_LINK BIT(12)
+#define MIIC_CONVCTRL_RGMII_DUPLEX BIT(13)
+#define MIIC_CONVCTRL_RGMII_SPEED GENMASK(15, 14)
+
+#define MIIC_CONVRST 0x114
+#define MIIC_CONVRST_PHYIF_RST(port) BIT(port)
+#define MIIC_CONVRST_PHYIF_RST_MASK GENMASK(4, 0)
+
+#define MIIC_SWCTRL 0x304
+#define MIIC_SWDUPC 0x308
+
+#define MIIC_MAX_NR_PORTS 5
+
+#define MIIC_MODCTRL_CONF_CONV_NUM 6
+#define MIIC_MODCTRL_CONF_NONE -1
+
+/**
+ * struct modctrl_match - Matching table entry for convctrl configuration
+ * See section 8.2.1 of manual.
+ * @mode_cfg: Configuration value for convctrl
+ * @conv: Configuration of ethernet port muxes. First index is SWITCH_PORTIN,
+ * then index 1 - 5 are CONV1 - CONV5.
+ */
+struct modctrl_match {
+ u32 mode_cfg;
+ u8 conv[MIIC_MODCTRL_CONF_CONV_NUM];
+};
+
+static struct modctrl_match modctrl_match_table[] = {
+ {0x0, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
+ {0x1, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x2, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x3, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}},
+
+ {0x8, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
+ {0x9, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0xA, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0xB, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}},
+
+ {0x10, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
+ {0x11, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x12, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x13, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}}
+};
+
+static const char * const conf_to_string[] = {
+ [MIIC_GMAC1_PORT] = "GMAC1_PORT",
+ [MIIC_GMAC2_PORT] = "GMAC2_PORT",
+ [MIIC_RTOS_PORT] = "RTOS_PORT",
+ [MIIC_SERCOS_PORTA] = "SERCOS_PORTA",
+ [MIIC_SERCOS_PORTB] = "SERCOS_PORTB",
+ [MIIC_ETHERCAT_PORTA] = "ETHERCAT_PORTA",
+ [MIIC_ETHERCAT_PORTB] = "ETHERCAT_PORTB",
+ [MIIC_ETHERCAT_PORTC] = "ETHERCAT_PORTC",
+ [MIIC_SWITCH_PORTA] = "SWITCH_PORTA",
+ [MIIC_SWITCH_PORTB] = "SWITCH_PORTB",
+ [MIIC_SWITCH_PORTC] = "SWITCH_PORTC",
+ [MIIC_SWITCH_PORTD] = "SWITCH_PORTD",
+ [MIIC_HSR_PORTA] = "HSR_PORTA",
+ [MIIC_HSR_PORTB] = "HSR_PORTB",
+};
+
+static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
+ "SWITCH_PORTIN",
+ "CONV1",
+ "CONV2",
+ "CONV3",
+ "CONV4",
+ "CONV5",
+};
+
+/**
+ * struct miic - MII converter structure
+ * @base: base address of the MII converter
+ * @dev: Device associated to the MII converter
+ * @clks: Clocks used for this device
+ * @nclk: Number of clocks
+ * @lock: Lock used for read-modify-write access
+ */
+struct miic {
+ void __iomem *base;
+ struct device *dev;
+ struct clk_bulk_data *clks;
+ int nclk;
+ spinlock_t lock;
+};
+
+/**
+ * struct miic_port - Per port MII converter struct
+ * @miic: backiling to MII converter structure
+ * @pcs: PCS structure associated to the port
+ * @port: port number
+ * @interface: interface mode of the port
+ */
+struct miic_port {
+ struct miic *miic;
+ struct phylink_pcs pcs;
+ int port;
+ phy_interface_t interface;
+};
+
+static struct miic_port *phylink_pcs_to_miic_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct miic_port, pcs);
+}
+
+static void miic_reg_writel(struct miic *miic, int offset, u32 value)
+{
+ writel(value, miic->base + offset);
+}
+
+static u32 miic_reg_readl(struct miic *miic, int offset)
+{
+ return readl(miic->base + offset);
+}
+
+static void miic_reg_rmw(struct miic *miic, int offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ spin_lock(&miic->lock);
+
+ reg = miic_reg_readl(miic, offset);
+ reg &= ~mask;
+ reg |= val;
+ miic_reg_writel(miic, offset, reg);
+
+ spin_unlock(&miic->lock);
+}
+
+static void miic_converter_enable(struct miic *miic, int port, int enable)
+{
+ u32 val = 0;
+
+ if (enable)
+ val = MIIC_CONVRST_PHYIF_RST(port);
+
+ miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val);
+}
+
+static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising, bool permit)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+ struct miic *miic = miic_port->miic;
+ u32 speed, conv_mode, val, mask;
+ int port = miic_port->port;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ conv_mode = CONV_MODE_RMII;
+ speed = CONV_MODE_100MBPS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ conv_mode = CONV_MODE_RGMII;
+ speed = CONV_MODE_1000MBPS;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ conv_mode = CONV_MODE_MII;
+ /* When in MII mode, speed should be set to 0 (which is actually
+ * CONV_MODE_10MBPS)
+ */
+ speed = CONV_MODE_10MBPS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ val = FIELD_PREP(MIIC_CONVCTRL_CONV_MODE, conv_mode);
+ mask = MIIC_CONVCTRL_CONV_MODE;
+
+ /* Update speed only if we are going to change the interface because
+ * the link might already be up and it would break it if the speed is
+ * changed.
+ */
+ if (interface != miic_port->interface) {
+ val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, speed);
+ mask |= MIIC_CONVCTRL_CONV_SPEED;
+ miic_port->interface = interface;
+ }
+
+ miic_reg_rmw(miic, MIIC_CONVCTRL(port), mask, val);
+ miic_converter_enable(miic_port->miic, miic_port->port, 1);
+
+ return 0;
+}
+
+static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+ struct miic *miic = miic_port->miic;
+ u32 conv_speed = 0, val = 0;
+ int port = miic_port->port;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MIIC_CONVCTRL_FULLD;
+
+ /* No speed in MII through-mode */
+ if (interface != PHY_INTERFACE_MODE_MII) {
+ switch (speed) {
+ case SPEED_1000:
+ conv_speed = CONV_MODE_1000MBPS;
+ break;
+ case SPEED_100:
+ conv_speed = CONV_MODE_100MBPS;
+ break;
+ case SPEED_10:
+ conv_speed = CONV_MODE_10MBPS;
+ break;
+ default:
+ return;
+ }
+ }
+
+ val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, conv_speed);
+
+ miic_reg_rmw(miic, MIIC_CONVCTRL(port),
+ (MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val);
+}
+
+static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ if (phy_interface_mode_is_rgmii(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_RMII ||
+ state->interface == PHY_INTERFACE_MODE_MII)
+ return 1;
+
+ return -EINVAL;
+}
+
+static const struct phylink_pcs_ops miic_phylink_ops = {
+ .pcs_validate = miic_validate,
+ .pcs_config = miic_config,
+ .pcs_link_up = miic_link_up,
+};
+
+struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct miic_port *miic_port;
+ struct device_node *pcs_np;
+ struct miic *miic;
+ u32 port;
+
+ if (!of_device_is_available(np))
+ return ERR_PTR(-ENODEV);
+
+ if (of_property_read_u32(np, "reg", &port))
+ return ERR_PTR(-EINVAL);
+
+ if (port > MIIC_MAX_NR_PORTS || port < 1)
+ return ERR_PTR(-EINVAL);
+
+ /* The PCS pdev is attached to the parent node */
+ pcs_np = of_get_parent(np);
+ if (!pcs_np)
+ return ERR_PTR(-ENODEV);
+
+ if (!of_device_is_available(pcs_np)) {
+ of_node_put(pcs_np);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(pcs_np);
+ of_node_put(pcs_np);
+ if (!pdev || !platform_get_drvdata(pdev))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
+ if (!miic_port)
+ return ERR_PTR(-ENOMEM);
+
+ miic = platform_get_drvdata(pdev);
+ device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ miic_port->miic = miic;
+ miic_port->port = port - 1;
+ miic_port->pcs.ops = &miic_phylink_ops;
+
+ return &miic_port->pcs;
+}
+EXPORT_SYMBOL(miic_create);
+
+void miic_destroy(struct phylink_pcs *pcs)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+
+ miic_converter_enable(miic_port->miic, miic_port->port, 0);
+ kfree(miic_port);
+}
+EXPORT_SYMBOL(miic_destroy);
+
+static int miic_init_hw(struct miic *miic, u32 cfg_mode)
+{
+ int port;
+
+ /* Unlock write access to accessory registers (cf datasheet). If this
+ * is going to be used in conjunction with the Cortex-M3, this sequence
+ * will have to be moved in register write
+ */
+ miic_reg_writel(miic, MIIC_PRCMD, 0x00A5);
+ miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
+ miic_reg_writel(miic, MIIC_PRCMD, 0xFFFE);
+ miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
+
+ miic_reg_writel(miic, MIIC_MODCTRL,
+ FIELD_PREP(MIIC_MODCTRL_SW_MODE, cfg_mode));
+
+ for (port = 0; port < MIIC_MAX_NR_PORTS; port++) {
+ miic_converter_enable(miic, port, 0);
+ /* Disable speed/duplex control from these registers, datasheet
+ * says switch registers should be used to setup switch port
+ * speed and duplex.
+ */
+ miic_reg_writel(miic, MIIC_SWCTRL, 0x0);
+ miic_reg_writel(miic, MIIC_SWDUPC, 0x0);
+ }
+
+ return 0;
+}
+
+static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
+ s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM])
+{
+ int i;
+
+ for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ if (dt_val[i] == MIIC_MODCTRL_CONF_NONE)
+ continue;
+
+ if (dt_val[i] != table_val[i])
+ return false;
+ }
+
+ return true;
+}
+
+static void miic_dump_conf(struct device *dev,
+ s8 conf[MIIC_MODCTRL_CONF_CONV_NUM])
+{
+ const char *conf_name;
+ int i;
+
+ for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ if (conf[i] != MIIC_MODCTRL_CONF_NONE)
+ conf_name = conf_to_string[conf[i]];
+ else
+ conf_name = "NONE";
+
+ dev_err(dev, "%s: %s\n", index_to_string[i], conf_name);
+ }
+}
+
+static int miic_match_dt_conf(struct device *dev,
+ s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM],
+ u32 *mode_cfg)
+{
+ struct modctrl_match *table_entry;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(modctrl_match_table); i++) {
+ table_entry = &modctrl_match_table[i];
+
+ if (miic_modctrl_match(table_entry->conv, dt_val)) {
+ *mode_cfg = table_entry->mode_cfg;
+ return 0;
+ }
+ }
+
+ dev_err(dev, "Failed to apply requested configuration\n");
+ miic_dump_conf(dev, dt_val);
+
+ return -EINVAL;
+}
+
+static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
+{
+ s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM];
+ struct device_node *np = dev->of_node;
+ struct device_node *conv;
+ u32 conf;
+ int port;
+
+ memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(dt_val));
+
+ if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0)
+ dt_val[0] = conf;
+
+ for_each_child_of_node(np, conv) {
+ if (of_property_read_u32(conv, "reg", &port))
+ continue;
+
+ if (!of_device_is_available(conv))
+ continue;
+
+ if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
+ dt_val[port] = conf;
+ }
+
+ return miic_match_dt_conf(dev, dt_val, mode_cfg);
+}
+
+static int miic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct miic *miic;
+ u32 mode_cfg;
+ int ret;
+
+ ret = miic_parse_dt(dev, &mode_cfg);
+ if (ret < 0)
+ return ret;
+
+ miic = devm_kzalloc(dev, sizeof(*miic), GFP_KERNEL);
+ if (!miic)
+ return -ENOMEM;
+
+ spin_lock_init(&miic->lock);
+ miic->dev = dev;
+ miic->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(miic->base))
+ return PTR_ERR(miic->base);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = miic_init_hw(miic, mode_cfg);
+ if (ret)
+ goto disable_runtime_pm;
+
+ /* miic_create() relies on that fact that data are attached to the
+ * platform device to determine if the driver is ready so this needs to
+ * be the last thing to be done after everything is initialized
+ * properly.
+ */
+ platform_set_drvdata(pdev, miic);
+
+ return 0;
+
+disable_runtime_pm:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int miic_remove(struct platform_device *pdev)
+{
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id miic_of_mtable[] = {
+ { .compatible = "renesas,rzn1-miic" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, miic_of_mtable);
+
+static struct platform_driver miic_driver = {
+ .driver = {
+ .name = "rzn1_miic",
+ .suppress_bind_attrs = true,
+ .of_match_table = miic_of_mtable,
+ },
+ .probe = miic_probe,
+ .remove = miic_remove,
+};
+module_platform_driver(miic_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas MII converter PCS driver");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9fee639ee5c8..c57a0262fb64 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -104,6 +104,8 @@ config AX88796B_PHY
config BROADCOM_PHY
tristate "Broadcom 54XX PHYs"
select BCM_NET_PHYLIB
+ select BCM_NET_PHYPTP if NETWORK_PHY_TIMESTAMPING
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
BCM5481, BCM54810 and BCM5482 PHYs.
@@ -160,6 +162,9 @@ config BCM_CYGNUS_PHY
config BCM_NET_PHYLIB
tristate
+config BCM_NET_PHYPTP
+ tristate
+
config CICADA_PHY
tristate "Cicada PHYs"
help
@@ -216,6 +221,8 @@ config MARVELL_88X2222_PHY
config MAXLINEAR_GPHY
tristate "Maxlinear Ethernet PHYs"
+ select POLYNOMIAL if HWMON
+ depends on HWMON || HWMON=n
help
Support for the Maxlinear GPY115, GPY211, GPY212, GPY215,
GPY241, GPY245 PHYs.
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b12b1d86fc99..f7138d3c896b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
+obj-$(CONFIG_BCM_NET_PHYPTP) += bcm-phy-ptp.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index a8db1a19011b..8b7a46db30e0 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -22,6 +22,7 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define PHY_ID_AQR113C 0x31c31c12
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
@@ -34,6 +35,8 @@
#define MDIO_AN_VEND_PROV 0xc400
#define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
+#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
+#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
@@ -231,9 +234,20 @@ static int aqr_config_aneg(struct phy_device *phydev)
phydev->advertising))
reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+ /* Handle the case when the 2.5G and 5G speeds are not advertised */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
MDIO_AN_VEND_PROV_1000BASET_HALF |
- MDIO_AN_VEND_PROV_1000BASET_FULL, reg);
+ MDIO_AN_VEND_PROV_1000BASET_FULL |
+ MDIO_AN_VEND_PROV_2500BASET_FULL |
+ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
if (ret < 0)
return ret;
if (ret > 0)
@@ -684,6 +698,24 @@ static struct phy_driver aqr_driver[] = {
.handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
+ .name = "Aquantia AQR113C",
+ .probe = aqr107_probe,
+ .config_init = aqr107_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
};
module_phy_driver(aqr_driver);
@@ -696,6 +728,7 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
{ }
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 6a467e7817a6..59fe356942b5 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -2072,6 +2072,8 @@ static struct phy_driver at803x_driver[] = {
/* ATHEROS AR9331 */
PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
.name = "Qualcomm Atheros AR9331 built-in PHY",
+ .probe = at803x_probe,
+ .remove = at803x_remove,
.suspend = at803x_suspend,
.resume = at803x_resume,
.flags = PHY_POLL_CABLE_TEST,
@@ -2087,6 +2089,8 @@ static struct phy_driver at803x_driver[] = {
/* Qualcomm Atheros QCA9561 */
PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
.name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .probe = at803x_probe,
+ .remove = at803x_remove,
.suspend = at803x_suspend,
.resume = at803x_resume,
.flags = PHY_POLL_CABLE_TEST,
@@ -2151,6 +2155,8 @@ static struct phy_driver at803x_driver[] = {
PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
.name = "Qualcomm QCA8081",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = at803x_probe,
+ .remove = at803x_remove,
.config_intr = at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index 457896337505..0f1e617a26c9 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -88,8 +88,10 @@ static void asix_ax88772a_link_change_notify(struct phy_device *phydev)
/* Reset PHY, otherwise MII_LPA will provide outdated information.
* This issue is reproducible only with some link partner PHYs
*/
- if (phydev->state == PHY_NOLINK && phydev->drv->soft_reset)
- phydev->drv->soft_reset(phydev);
+ if (phydev->state == PHY_NOLINK) {
+ phy_init_hw(phydev);
+ phy_start_aneg(phydev);
+ }
}
static struct phy_driver asix_driver[] = {
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index c3842f87c33b..9902fb182099 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -87,4 +87,23 @@ int bcm_phy_cable_test_start_rdb(struct phy_device *phydev);
int bcm_phy_cable_test_start(struct phy_device *phydev);
int bcm_phy_cable_test_get_status(struct phy_device *phydev, bool *finished);
+#if IS_ENABLED(CONFIG_BCM_NET_PHYPTP)
+struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev);
+void bcm_ptp_config_init(struct phy_device *phydev);
+void bcm_ptp_stop(struct bcm_ptp_private *priv);
+#else
+static inline struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
+{
+ return NULL;
+}
+
+static inline void bcm_ptp_config_init(struct phy_device *phydev)
+{
+}
+
+static inline void bcm_ptp_stop(struct bcm_ptp_private *priv)
+{
+}
+#endif
+
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
new file mode 100644
index 000000000000..ef00d6163061
--- /dev/null
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Meta Platforms Inc.
+ * Copyright (C) 2022 Jonathan Lemon <jonathan.lemon@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+
+#include "bcm-phy-lib.h"
+
+/* IEEE 1588 Expansion registers */
+#define SLICE_CTRL 0x0810
+#define SLICE_TX_EN BIT(0)
+#define SLICE_RX_EN BIT(8)
+#define TX_EVENT_MODE 0x0811
+#define MODE_TX_UPDATE_CF BIT(0)
+#define MODE_TX_REPLACE_TS_CF BIT(1)
+#define MODE_TX_REPLACE_TS GENMASK(1, 0)
+#define RX_EVENT_MODE 0x0819
+#define MODE_RX_UPDATE_CF BIT(0)
+#define MODE_RX_INSERT_TS_48 BIT(1)
+#define MODE_RX_INSERT_TS_64 GENMASK(1, 0)
+
+#define MODE_EVT_SHIFT_SYNC 0
+#define MODE_EVT_SHIFT_DELAY_REQ 2
+#define MODE_EVT_SHIFT_PDELAY_REQ 4
+#define MODE_EVT_SHIFT_PDELAY_RESP 6
+
+#define MODE_SEL_SHIFT_PORT 0
+#define MODE_SEL_SHIFT_CPU 8
+
+#define RX_MODE_SEL(sel, evt, act) \
+ (((MODE_RX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel))
+
+#define TX_MODE_SEL(sel, evt, act) \
+ (((MODE_TX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel))
+
+/* needs global TS capture first */
+#define TX_TS_CAPTURE 0x0821
+#define TX_TS_CAP_EN BIT(0)
+#define RX_TS_CAPTURE 0x0822
+#define RX_TS_CAP_EN BIT(0)
+
+#define TIME_CODE_0 0x0854
+#define TIME_CODE_1 0x0855
+#define TIME_CODE_2 0x0856
+#define TIME_CODE_3 0x0857
+#define TIME_CODE_4 0x0858
+
+#define DPLL_SELECT 0x085b
+#define DPLL_HB_MODE2 BIT(6)
+
+#define SHADOW_CTRL 0x085c
+#define SHADOW_LOAD 0x085d
+#define TIME_CODE_LOAD BIT(10)
+#define SYNC_OUT_LOAD BIT(9)
+#define NCO_TIME_LOAD BIT(7)
+#define FREQ_LOAD BIT(6)
+#define INTR_MASK 0x085e
+#define INTR_STATUS 0x085f
+#define INTC_FSYNC BIT(0)
+#define INTC_SOP BIT(1)
+
+#define NCO_FREQ_LSB 0x0873
+#define NCO_FREQ_MSB 0x0874
+
+#define NCO_TIME_0 0x0875
+#define NCO_TIME_1 0x0876
+#define NCO_TIME_2_CTRL 0x0877
+#define FREQ_MDIO_SEL BIT(14)
+
+#define SYNC_OUT_0 0x0878
+#define SYNC_OUT_1 0x0879
+#define SYNC_OUT_2 0x087a
+
+#define SYNC_IN_DIVIDER 0x087b
+
+#define SYNOUT_TS_0 0x087c
+#define SYNOUT_TS_1 0x087d
+#define SYNOUT_TS_2 0x087e
+
+#define NSE_CTRL 0x087f
+#define NSE_GMODE_EN GENMASK(15, 14)
+#define NSE_CAPTURE_EN BIT(13)
+#define NSE_INIT BIT(12)
+#define NSE_CPU_FRAMESYNC BIT(5)
+#define NSE_SYNC1_FRAMESYNC BIT(3)
+#define NSE_FRAMESYNC_MASK GENMASK(5, 2)
+#define NSE_PEROUT_EN BIT(1)
+#define NSE_ONESHOT_EN BIT(0)
+#define NSE_SYNC_OUT_MASK GENMASK(1, 0)
+
+#define TS_READ_CTRL 0x0885
+#define TS_READ_START BIT(0)
+#define TS_READ_END BIT(1)
+
+#define HB_REG_0 0x0886
+#define HB_REG_1 0x0887
+#define HB_REG_2 0x0888
+#define HB_REG_3 0x08ec
+#define HB_REG_4 0x08ed
+#define HB_STAT_CTRL 0x088e
+#define HB_READ_START BIT(10)
+#define HB_READ_END BIT(11)
+#define HB_READ_MASK GENMASK(11, 10)
+
+#define TS_REG_0 0x0889
+#define TS_REG_1 0x088a
+#define TS_REG_2 0x088b
+#define TS_REG_3 0x08c4
+
+#define TS_INFO_0 0x088c
+#define TS_INFO_1 0x088d
+
+#define TIMECODE_CTRL 0x08c3
+#define TX_TIMECODE_SEL GENMASK(7, 0)
+#define RX_TIMECODE_SEL GENMASK(15, 8)
+
+#define TIME_SYNC 0x0ff5
+#define TIME_SYNC_EN BIT(0)
+
+struct bcm_ptp_private {
+ struct phy_device *phydev;
+ struct mii_timestamper mii_ts;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+ struct ptp_pin_desc pin;
+ struct mutex mutex;
+ struct sk_buff_head tx_queue;
+ int tx_type;
+ bool hwts_rx;
+ u16 nse_ctrl;
+ bool pin_active;
+ struct delayed_work pin_work;
+};
+
+struct bcm_ptp_skb_cb {
+ unsigned long timeout;
+ u16 seq_id;
+ u8 msgtype;
+ bool discard;
+};
+
+struct bcm_ptp_capture {
+ ktime_t hwtstamp;
+ u16 seq_id;
+ u8 msgtype;
+ bool tx_dir;
+};
+
+#define BCM_SKB_CB(skb) ((struct bcm_ptp_skb_cb *)(skb)->cb)
+#define SKB_TS_TIMEOUT 10 /* jiffies */
+
+#define BCM_MAX_PULSE_8NS ((1U << 9) - 1)
+#define BCM_MAX_PERIOD_8NS ((1U << 30) - 1)
+
+#define BRCM_PHY_MODEL(phydev) \
+ ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
+
+static struct bcm_ptp_private *mii2priv(struct mii_timestamper *mii_ts)
+{
+ return container_of(mii_ts, struct bcm_ptp_private, mii_ts);
+}
+
+static struct bcm_ptp_private *ptp2priv(struct ptp_clock_info *info)
+{
+ return container_of(info, struct bcm_ptp_private, ptp_info);
+}
+
+static void bcm_ptp_get_framesync_ts(struct phy_device *phydev,
+ struct timespec64 *ts)
+{
+ u16 hb[4];
+
+ bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_START);
+
+ hb[0] = bcm_phy_read_exp(phydev, HB_REG_0);
+ hb[1] = bcm_phy_read_exp(phydev, HB_REG_1);
+ hb[2] = bcm_phy_read_exp(phydev, HB_REG_2);
+ hb[3] = bcm_phy_read_exp(phydev, HB_REG_3);
+
+ bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_END);
+ bcm_phy_write_exp(phydev, HB_STAT_CTRL, 0);
+
+ ts->tv_sec = (hb[3] << 16) | hb[2];
+ ts->tv_nsec = (hb[1] << 16) | hb[0];
+}
+
+static u16 bcm_ptp_framesync_disable(struct phy_device *phydev, u16 orig_ctrl)
+{
+ u16 ctrl = orig_ctrl & ~(NSE_FRAMESYNC_MASK | NSE_CAPTURE_EN);
+
+ bcm_phy_write_exp(phydev, NSE_CTRL, ctrl);
+
+ return ctrl;
+}
+
+static void bcm_ptp_framesync_restore(struct phy_device *phydev, u16 orig_ctrl)
+{
+ if (orig_ctrl & NSE_FRAMESYNC_MASK)
+ bcm_phy_write_exp(phydev, NSE_CTRL, orig_ctrl);
+}
+
+static void bcm_ptp_framesync(struct phy_device *phydev, u16 ctrl)
+{
+ /* trigger framesync - must have 0->1 transition. */
+ bcm_phy_write_exp(phydev, NSE_CTRL, ctrl | NSE_CPU_FRAMESYNC);
+}
+
+static int bcm_ptp_framesync_ts(struct phy_device *phydev,
+ struct ptp_system_timestamp *sts,
+ struct timespec64 *ts,
+ u16 orig_ctrl)
+{
+ u16 ctrl, reg;
+ int i;
+
+ ctrl = bcm_ptp_framesync_disable(phydev, orig_ctrl);
+
+ ptp_read_system_prets(sts);
+
+ /* trigger framesync + capture */
+ bcm_ptp_framesync(phydev, ctrl | NSE_CAPTURE_EN);
+
+ ptp_read_system_postts(sts);
+
+ /* poll for FSYNC interrupt from TS capture */
+ for (i = 0; i < 10; i++) {
+ reg = bcm_phy_read_exp(phydev, INTR_STATUS);
+ if (reg & INTC_FSYNC) {
+ bcm_ptp_get_framesync_ts(phydev, ts);
+ break;
+ }
+ }
+
+ bcm_ptp_framesync_restore(phydev, orig_ctrl);
+
+ return reg & INTC_FSYNC ? 0 : -ETIMEDOUT;
+}
+
+static int bcm_ptp_gettimex(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err;
+
+ mutex_lock(&priv->mutex);
+ err = bcm_ptp_framesync_ts(priv->phydev, sts, ts, priv->nse_ctrl);
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int bcm_ptp_settime_locked(struct bcm_ptp_private *priv,
+ const struct timespec64 *ts)
+{
+ struct phy_device *phydev = priv->phydev;
+ u16 ctrl;
+ u64 ns;
+
+ ctrl = bcm_ptp_framesync_disable(phydev, priv->nse_ctrl);
+
+ /* set up time code */
+ bcm_phy_write_exp(phydev, TIME_CODE_0, ts->tv_nsec);
+ bcm_phy_write_exp(phydev, TIME_CODE_1, ts->tv_nsec >> 16);
+ bcm_phy_write_exp(phydev, TIME_CODE_2, ts->tv_sec);
+ bcm_phy_write_exp(phydev, TIME_CODE_3, ts->tv_sec >> 16);
+ bcm_phy_write_exp(phydev, TIME_CODE_4, ts->tv_sec >> 32);
+
+ /* set NCO counter to match */
+ ns = timespec64_to_ns(ts);
+ bcm_phy_write_exp(phydev, NCO_TIME_0, ns >> 4);
+ bcm_phy_write_exp(phydev, NCO_TIME_1, ns >> 20);
+ bcm_phy_write_exp(phydev, NCO_TIME_2_CTRL, (ns >> 36) & 0xfff);
+
+ /* set up load on next frame sync (auto-clears due to NSE_INIT) */
+ bcm_phy_write_exp(phydev, SHADOW_LOAD, TIME_CODE_LOAD | NCO_TIME_LOAD);
+
+ /* must have NSE_INIT in order to write time code */
+ bcm_ptp_framesync(phydev, ctrl | NSE_INIT);
+
+ bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
+
+ return 0;
+}
+
+static int bcm_ptp_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err;
+
+ mutex_lock(&priv->mutex);
+ err = bcm_ptp_settime_locked(priv, ts);
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int bcm_ptp_adjtime_locked(struct bcm_ptp_private *priv,
+ s64 delta_ns)
+{
+ struct timespec64 ts;
+ int err;
+ s64 ns;
+
+ err = bcm_ptp_framesync_ts(priv->phydev, NULL, &ts, priv->nse_ctrl);
+ if (!err) {
+ ns = timespec64_to_ns(&ts) + delta_ns;
+ ts = ns_to_timespec64(ns);
+ err = bcm_ptp_settime_locked(priv, &ts);
+ }
+ return err;
+}
+
+static int bcm_ptp_adjtime(struct ptp_clock_info *info, s64 delta_ns)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err;
+
+ mutex_lock(&priv->mutex);
+ err = bcm_ptp_adjtime_locked(priv, delta_ns);
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+/* A 125Mhz clock should adjust 8ns per pulse.
+ * The frequency adjustment base is 0x8000 0000, or 8*2^28.
+ *
+ * Frequency adjustment is
+ * adj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
+ * which simplifies to:
+ * adj = scaled_ppm * 2^9 / 5^6
+ */
+static int bcm_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int neg_adj = 0;
+ u32 diff, freq;
+ u16 ctrl;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = scaled_ppm << 9;
+ diff = div_u64(adj, 15625);
+ freq = (8 << 28) + (neg_adj ? -diff : diff);
+
+ mutex_lock(&priv->mutex);
+
+ ctrl = bcm_ptp_framesync_disable(priv->phydev, priv->nse_ctrl);
+
+ bcm_phy_write_exp(priv->phydev, NCO_FREQ_LSB, freq);
+ bcm_phy_write_exp(priv->phydev, NCO_FREQ_MSB, freq >> 16);
+
+ bcm_phy_write_exp(priv->phydev, NCO_TIME_2_CTRL, FREQ_MDIO_SEL);
+
+ /* load on next framesync */
+ bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, FREQ_LOAD);
+
+ bcm_ptp_framesync(priv->phydev, ctrl);
+
+ /* clear load */
+ bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, 0);
+
+ bcm_ptp_framesync_restore(priv->phydev, priv->nse_ctrl);
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
+static bool bcm_ptp_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+ struct skb_shared_hwtstamps *hwts;
+ struct ptp_header *header;
+ u32 sec, nsec;
+ u8 *data;
+ int off;
+
+ if (!priv->hwts_rx)
+ return false;
+
+ header = ptp_parse_header(skb, type);
+ if (!header)
+ return false;
+
+ data = (u8 *)(header + 1);
+ sec = get_unaligned_be32(data);
+ nsec = get_unaligned_be32(data + 4);
+
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = ktime_set(sec, nsec);
+
+ off = data - skb->data + 8;
+ if (off < skb->len) {
+ memmove(data, data + 8, skb->len - off);
+ __pskb_trim(skb, skb->len - 8);
+ }
+
+ return false;
+}
+
+static bool bcm_ptp_get_tstamp(struct bcm_ptp_private *priv,
+ struct bcm_ptp_capture *capts)
+{
+ struct phy_device *phydev = priv->phydev;
+ u16 ts[4], reg;
+ u32 sec, nsec;
+
+ mutex_lock(&priv->mutex);
+
+ reg = bcm_phy_read_exp(phydev, INTR_STATUS);
+ if ((reg & INTC_SOP) == 0) {
+ mutex_unlock(&priv->mutex);
+ return false;
+ }
+
+ bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_START);
+
+ ts[0] = bcm_phy_read_exp(phydev, TS_REG_0);
+ ts[1] = bcm_phy_read_exp(phydev, TS_REG_1);
+ ts[2] = bcm_phy_read_exp(phydev, TS_REG_2);
+ ts[3] = bcm_phy_read_exp(phydev, TS_REG_3);
+
+ /* not in be32 format for some reason */
+ capts->seq_id = bcm_phy_read_exp(priv->phydev, TS_INFO_0);
+
+ reg = bcm_phy_read_exp(phydev, TS_INFO_1);
+ capts->msgtype = reg >> 12;
+ capts->tx_dir = !!(reg & BIT(11));
+
+ bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_END);
+ bcm_phy_write_exp(phydev, TS_READ_CTRL, 0);
+
+ mutex_unlock(&priv->mutex);
+
+ sec = (ts[3] << 16) | ts[2];
+ nsec = (ts[1] << 16) | ts[0];
+ capts->hwtstamp = ktime_set(sec, nsec);
+
+ return true;
+}
+
+static void bcm_ptp_match_tstamp(struct bcm_ptp_private *priv,
+ struct bcm_ptp_capture *capts)
+{
+ struct skb_shared_hwtstamps hwts;
+ struct sk_buff *skb, *ts_skb;
+ unsigned long flags;
+ bool first = false;
+
+ ts_skb = NULL;
+ spin_lock_irqsave(&priv->tx_queue.lock, flags);
+ skb_queue_walk(&priv->tx_queue, skb) {
+ if (BCM_SKB_CB(skb)->seq_id == capts->seq_id &&
+ BCM_SKB_CB(skb)->msgtype == capts->msgtype) {
+ first = skb_queue_is_first(&priv->tx_queue, skb);
+ __skb_unlink(skb, &priv->tx_queue);
+ ts_skb = skb;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
+
+ /* TX captures one-step packets, discard them if needed. */
+ if (ts_skb) {
+ if (BCM_SKB_CB(ts_skb)->discard) {
+ kfree_skb(ts_skb);
+ } else {
+ memset(&hwts, 0, sizeof(hwts));
+ hwts.hwtstamp = capts->hwtstamp;
+ skb_complete_tx_timestamp(ts_skb, &hwts);
+ }
+ }
+
+ /* not first match, try and expire entries */
+ if (!first) {
+ while ((skb = skb_dequeue(&priv->tx_queue))) {
+ if (!time_after(jiffies, BCM_SKB_CB(skb)->timeout)) {
+ skb_queue_head(&priv->tx_queue, skb);
+ break;
+ }
+ kfree_skb(skb);
+ }
+ }
+}
+
+static long bcm_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ struct bcm_ptp_capture capts;
+ bool reschedule = false;
+
+ while (!skb_queue_empty_lockless(&priv->tx_queue)) {
+ if (!bcm_ptp_get_tstamp(priv, &capts)) {
+ reschedule = true;
+ break;
+ }
+ bcm_ptp_match_tstamp(priv, &capts);
+ }
+
+ return reschedule ? 1 : -1;
+}
+
+static int bcm_ptp_cancel_func(struct bcm_ptp_private *priv)
+{
+ if (!priv->pin_active)
+ return 0;
+
+ priv->pin_active = false;
+
+ priv->nse_ctrl &= ~(NSE_SYNC_OUT_MASK | NSE_SYNC1_FRAMESYNC |
+ NSE_CAPTURE_EN);
+ bcm_phy_write_exp(priv->phydev, NSE_CTRL, priv->nse_ctrl);
+
+ cancel_delayed_work_sync(&priv->pin_work);
+
+ return 0;
+}
+
+static void bcm_ptp_perout_work(struct work_struct *pin_work)
+{
+ struct bcm_ptp_private *priv =
+ container_of(pin_work, struct bcm_ptp_private, pin_work.work);
+ struct phy_device *phydev = priv->phydev;
+ struct timespec64 ts;
+ u64 ns, next;
+ u16 ctrl;
+
+ mutex_lock(&priv->mutex);
+
+ /* no longer running */
+ if (!priv->pin_active) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ bcm_ptp_framesync_ts(phydev, NULL, &ts, priv->nse_ctrl);
+
+ /* this is 1PPS only */
+ next = NSEC_PER_SEC - ts.tv_nsec;
+ ts.tv_sec += next < NSEC_PER_MSEC ? 2 : 1;
+ ts.tv_nsec = 0;
+
+ ns = timespec64_to_ns(&ts);
+
+ /* force 0->1 transition for ONESHOT */
+ ctrl = bcm_ptp_framesync_disable(phydev,
+ priv->nse_ctrl & ~NSE_ONESHOT_EN);
+
+ bcm_phy_write_exp(phydev, SYNOUT_TS_0, ns & 0xfff0);
+ bcm_phy_write_exp(phydev, SYNOUT_TS_1, ns >> 16);
+ bcm_phy_write_exp(phydev, SYNOUT_TS_2, ns >> 32);
+
+ /* load values on next framesync */
+ bcm_phy_write_exp(phydev, SHADOW_LOAD, SYNC_OUT_LOAD);
+
+ bcm_ptp_framesync(phydev, ctrl | NSE_ONESHOT_EN | NSE_INIT);
+
+ priv->nse_ctrl |= NSE_ONESHOT_EN;
+ bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
+
+ mutex_unlock(&priv->mutex);
+
+ next = next + NSEC_PER_MSEC;
+ schedule_delayed_work(&priv->pin_work, nsecs_to_jiffies(next));
+}
+
+static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
+ struct ptp_perout_request *req, int on)
+{
+ struct phy_device *phydev = priv->phydev;
+ u64 period, pulse;
+ u16 val;
+
+ if (!on)
+ return bcm_ptp_cancel_func(priv);
+
+ /* 1PPS */
+ if (req->period.sec != 1 || req->period.nsec != 0)
+ return -EINVAL;
+
+ period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
+
+ if (req->flags & PTP_PEROUT_PHASE)
+ return -EOPNOTSUPP;
+
+ if (req->flags & PTP_PEROUT_DUTY_CYCLE)
+ pulse = ktime_to_ns(ktime_set(req->on.sec, req->on.nsec));
+ else
+ pulse = (u64)BCM_MAX_PULSE_8NS << 3;
+
+ /* convert to 8ns units */
+ pulse >>= 3;
+
+ if (!pulse || pulse > period || pulse > BCM_MAX_PULSE_8NS)
+ return -EINVAL;
+
+ bcm_phy_write_exp(phydev, SYNC_OUT_0, period);
+
+ val = ((pulse & 0x3) << 14) | ((period >> 16) & 0x3fff);
+ bcm_phy_write_exp(phydev, SYNC_OUT_1, val);
+
+ val = ((pulse >> 2) & 0x7f) | (pulse << 7);
+ bcm_phy_write_exp(phydev, SYNC_OUT_2, val);
+
+ if (priv->pin_active)
+ cancel_delayed_work_sync(&priv->pin_work);
+
+ priv->pin_active = true;
+ INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_perout_work);
+ schedule_delayed_work(&priv->pin_work, 0);
+
+ return 0;
+}
+
+static void bcm_ptp_extts_work(struct work_struct *pin_work)
+{
+ struct bcm_ptp_private *priv =
+ container_of(pin_work, struct bcm_ptp_private, pin_work.work);
+ struct phy_device *phydev = priv->phydev;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+ u16 reg;
+
+ mutex_lock(&priv->mutex);
+
+ /* no longer running */
+ if (!priv->pin_active) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ reg = bcm_phy_read_exp(phydev, INTR_STATUS);
+ if ((reg & INTC_FSYNC) == 0)
+ goto out;
+
+ bcm_ptp_get_framesync_ts(phydev, &ts);
+
+ event.index = 0;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = timespec64_to_ns(&ts);
+ ptp_clock_event(priv->ptp_clock, &event);
+
+out:
+ mutex_unlock(&priv->mutex);
+ schedule_delayed_work(&priv->pin_work, HZ / 4);
+}
+
+static int bcm_ptp_extts_locked(struct bcm_ptp_private *priv, int on)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (!on)
+ return bcm_ptp_cancel_func(priv);
+
+ if (priv->pin_active)
+ cancel_delayed_work_sync(&priv->pin_work);
+
+ bcm_ptp_framesync_disable(phydev, priv->nse_ctrl);
+
+ priv->nse_ctrl |= NSE_SYNC1_FRAMESYNC | NSE_CAPTURE_EN;
+
+ bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
+
+ priv->pin_active = true;
+ INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_extts_work);
+ schedule_delayed_work(&priv->pin_work, 0);
+
+ return 0;
+}
+
+static int bcm_ptp_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err = -EBUSY;
+
+ mutex_lock(&priv->mutex);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (priv->pin.func == PTP_PF_PEROUT)
+ err = bcm_ptp_perout_locked(priv, &rq->perout, on);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ if (priv->pin.func == PTP_PF_EXTTS)
+ err = bcm_ptp_extts_locked(priv, on);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int bcm_ptp_verify(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static const struct ptp_clock_info bcm_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .max_adj = 100000000,
+ .gettimex64 = bcm_ptp_gettimex,
+ .settime64 = bcm_ptp_settime,
+ .adjtime = bcm_ptp_adjtime,
+ .adjfine = bcm_ptp_adjfine,
+ .enable = bcm_ptp_enable,
+ .verify = bcm_ptp_verify,
+ .do_aux_work = bcm_ptp_do_aux_work,
+ .n_pins = 1,
+ .n_per_out = 1,
+ .n_ext_ts = 1,
+};
+
+static void bcm_ptp_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+ struct ptp_header *hdr;
+ bool discard = false;
+ int msgtype;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ goto out;
+ msgtype = ptp_get_msgtype(hdr, type);
+
+ switch (priv->tx_type) {
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ if (msgtype == PTP_MSGTYPE_PDELAY_RESP)
+ discard = true;
+ fallthrough;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (msgtype == PTP_MSGTYPE_SYNC)
+ discard = true;
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ BCM_SKB_CB(skb)->timeout = jiffies + SKB_TS_TIMEOUT;
+ BCM_SKB_CB(skb)->seq_id = be16_to_cpu(hdr->sequence_id);
+ BCM_SKB_CB(skb)->msgtype = msgtype;
+ BCM_SKB_CB(skb)->discard = discard;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&priv->tx_queue, skb);
+ ptp_schedule_worker(priv->ptp_clock, 0);
+ return;
+ default:
+ break;
+ }
+
+out:
+ kfree_skb(skb);
+}
+
+static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
+ struct ifreq *ifr)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+ struct hwtstamp_config cfg;
+ u16 mode, ctrl;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ priv->hwts_rx = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ priv->tx_type = cfg.tx_type;
+
+ ctrl = priv->hwts_rx ? SLICE_RX_EN : 0;
+ ctrl |= priv->tx_type != HWTSTAMP_TX_OFF ? SLICE_TX_EN : 0;
+
+ mode = TX_MODE_SEL(PORT, SYNC, REPLACE_TS) |
+ TX_MODE_SEL(PORT, DELAY_REQ, REPLACE_TS) |
+ TX_MODE_SEL(PORT, PDELAY_REQ, REPLACE_TS) |
+ TX_MODE_SEL(PORT, PDELAY_RESP, REPLACE_TS);
+
+ bcm_phy_write_exp(priv->phydev, TX_EVENT_MODE, mode);
+
+ mode = RX_MODE_SEL(PORT, SYNC, INSERT_TS_64) |
+ RX_MODE_SEL(PORT, DELAY_REQ, INSERT_TS_64) |
+ RX_MODE_SEL(PORT, PDELAY_REQ, INSERT_TS_64) |
+ RX_MODE_SEL(PORT, PDELAY_RESP, INSERT_TS_64);
+
+ bcm_phy_write_exp(priv->phydev, RX_EVENT_MODE, mode);
+
+ bcm_phy_write_exp(priv->phydev, SLICE_CTRL, ctrl);
+
+ if (ctrl & SLICE_TX_EN)
+ bcm_phy_write_exp(priv->phydev, TX_TS_CAPTURE, TX_TS_CAP_EN);
+ else
+ ptp_cancel_worker_sync(priv->ptp_clock);
+
+ /* purge existing data */
+ skb_queue_purge(&priv->tx_queue);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int bcm_ptp_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *ts_info)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+
+ ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
+ ts_info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ ts_info->tx_types =
+ BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC) |
+ BIT(HWTSTAMP_TX_ONESTEP_P2P);
+ ts_info->rx_filters =
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+void bcm_ptp_stop(struct bcm_ptp_private *priv)
+{
+ ptp_cancel_worker_sync(priv->ptp_clock);
+ bcm_ptp_cancel_func(priv);
+}
+EXPORT_SYMBOL_GPL(bcm_ptp_stop);
+
+void bcm_ptp_config_init(struct phy_device *phydev)
+{
+ /* init network sync engine */
+ bcm_phy_write_exp(phydev, NSE_CTRL, NSE_GMODE_EN | NSE_INIT);
+
+ /* enable time sync (TX/RX SOP capture) */
+ bcm_phy_write_exp(phydev, TIME_SYNC, TIME_SYNC_EN);
+
+ /* use sec.nsec heartbeat capture */
+ bcm_phy_write_exp(phydev, DPLL_SELECT, DPLL_HB_MODE2);
+
+ /* use 64 bit timecode for TX */
+ bcm_phy_write_exp(phydev, TIMECODE_CTRL, TX_TIMECODE_SEL);
+
+ /* always allow FREQ_LOAD on framesync */
+ bcm_phy_write_exp(phydev, SHADOW_CTRL, FREQ_LOAD);
+
+ bcm_phy_write_exp(phydev, SYNC_IN_DIVIDER, 1);
+}
+EXPORT_SYMBOL_GPL(bcm_ptp_config_init);
+
+static void bcm_ptp_init(struct bcm_ptp_private *priv)
+{
+ priv->nse_ctrl = NSE_GMODE_EN;
+
+ mutex_init(&priv->mutex);
+ skb_queue_head_init(&priv->tx_queue);
+
+ priv->mii_ts.rxtstamp = bcm_ptp_rxtstamp;
+ priv->mii_ts.txtstamp = bcm_ptp_txtstamp;
+ priv->mii_ts.hwtstamp = bcm_ptp_hwtstamp;
+ priv->mii_ts.ts_info = bcm_ptp_ts_info;
+
+ priv->phydev->mii_ts = &priv->mii_ts;
+}
+
+struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
+{
+ struct bcm_ptp_private *priv;
+ struct ptp_clock *clock;
+
+ switch (BRCM_PHY_MODEL(phydev)) {
+ case PHY_ID_BCM54210E:
+ break;
+ default:
+ return NULL;
+ }
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->ptp_info = bcm_ptp_clock_info;
+
+ snprintf(priv->pin.name, sizeof(priv->pin.name), "SYNC_OUT");
+ priv->ptp_info.pin_config = &priv->pin;
+
+ clock = ptp_clock_register(&priv->ptp_info, &phydev->mdio.dev);
+ if (IS_ERR(clock))
+ return ERR_CAST(clock);
+ priv->ptp_clock = clock;
+
+ priv->phydev = phydev;
+ bcm_ptp_init(priv);
+
+ return priv;
+}
+EXPORT_SYMBOL_GPL(bcm_ptp_probe);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index e36809aa6d30..31fbcdddc9ad 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -27,6 +27,11 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
+struct bcm54xx_phy_priv {
+ u64 *stats;
+ struct bcm_ptp_private *ptp;
+};
+
static int bcm54xx_config_clock_delay(struct phy_device *phydev)
{
int rc, val;
@@ -313,6 +318,22 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
bcm_phy_write_shadow(phydev, BCM54XX_SHD_APD, val);
}
+static void bcm54xx_ptp_stop(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+
+ if (priv->ptp)
+ bcm_ptp_stop(priv->ptp);
+}
+
+static void bcm54xx_ptp_config_init(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+
+ if (priv->ptp)
+ bcm_ptp_config_init(phydev);
+}
+
static int bcm54xx_config_init(struct phy_device *phydev)
{
int reg, err, val;
@@ -390,6 +411,8 @@ static int bcm54xx_config_init(struct phy_device *phydev)
bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
}
+ bcm54xx_ptp_config_init(phydev);
+
return 0;
}
@@ -418,6 +441,8 @@ static int bcm54xx_suspend(struct phy_device *phydev)
{
int ret;
+ bcm54xx_ptp_stop(phydev);
+
/* We cannot use a read/modify/write here otherwise the PHY gets into
* a bad state where its LEDs keep flashing, thus defeating the purpose
* of low power mode.
@@ -741,10 +766,6 @@ static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-struct bcm54xx_phy_priv {
- u64 *stats;
-};
-
static int bcm54xx_phy_probe(struct phy_device *phydev)
{
struct bcm54xx_phy_priv *priv;
@@ -761,6 +782,10 @@ static int bcm54xx_phy_probe(struct phy_device *phydev)
if (!priv->stats)
return -ENOMEM;
+ priv->ptp = bcm_ptp_probe(phydev);
+ if (IS_ERR(priv->ptp))
+ return PTR_ERR(priv->ptp);
+
return 0;
}
@@ -1042,6 +1067,20 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
}, {
+ .phy_id = PHY_ID_BCM53128,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM53128",
+ .flags = PHY_IS_INTERNAL,
+ /* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
+ .config_init = bcm54xx_config_init,
+ .config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
+}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
@@ -1077,6 +1116,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5241, 0xfffffff0 },
{ PHY_ID_BCM5395, 0xfffffff0 },
{ PHY_ID_BCM53125, 0xfffffff0 },
+ { PHY_ID_BCM53128, 0xfffffff0 },
{ PHY_ID_BCM89610, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index e6ad3a494d32..8549e0e356c9 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -229,9 +229,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
if (misr_status < 0)
return misr_status;
- misr_status |= (DP83822_RX_ERR_HF_INT_EN |
- DP83822_FALSE_CARRIER_HF_INT_EN |
- DP83822_LINK_STAT_INT_EN |
+ misr_status |= (DP83822_LINK_STAT_INT_EN |
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index 1ae792b0daaa..3cd9a77f9532 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -27,6 +27,27 @@
#define DP83TD510E_AN_STAT_1 0x60c
#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
+#define DP83TD510E_MSE_DETECT 0xa85
+
+#define DP83TD510_SQI_MAX 7
+
+/* Register values are converted to SNR(dB) as suggested by
+ * "Application Report - DP83TD510E Cable Diagnostics Toolkit":
+ * SNR(dB) = -10 * log10 (VAL/2^17) - 1.76 dB.
+ * SQI ranges are implemented according to "OPEN ALLIANCE - Advanced diagnostic
+ * features for 100BASE-T1 automotive Ethernet PHYs"
+ */
+static const u16 dp83td510_mse_sqi_map[] = {
+ 0x0569, /* < 18dB */
+ 0x044c, /* 18dB =< SNR < 19dB */
+ 0x0369, /* 19dB =< SNR < 20dB */
+ 0x02b6, /* 20dB =< SNR < 21dB */
+ 0x0227, /* 21dB =< SNR < 22dB */
+ 0x01b6, /* 22dB =< SNR < 23dB */
+ 0x015b, /* 23dB =< SNR < 24dB */
+ 0x0000 /* 24dB =< SNR */
+};
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -164,6 +185,32 @@ static int dp83td510_config_aneg(struct phy_device *phydev)
return genphy_c45_check_and_restart_aneg(phydev, changed);
}
+static int dp83td510_get_sqi(struct phy_device *phydev)
+{
+ int sqi, ret;
+ u16 mse_val;
+
+ if (!phydev->link)
+ return 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_MSE_DETECT);
+ if (ret < 0)
+ return ret;
+
+ mse_val = 0xFFFF & ret;
+ for (sqi = 0; sqi < ARRAY_SIZE(dp83td510_mse_sqi_map); sqi++) {
+ if (mse_val >= dp83td510_mse_sqi_map[sqi])
+ return sqi;
+ }
+
+ return -EINVAL;
+}
+
+static int dp83td510_get_sqi_max(struct phy_device *phydev)
+{
+ return DP83TD510_SQI_MAX;
+}
+
static int dp83td510_get_features(struct phy_device *phydev)
{
/* This PHY can't respond on MDIO bus if no RMII clock is enabled.
@@ -192,6 +239,8 @@ static struct phy_driver dp83td510_driver[] = {
.get_features = dp83td510_get_features,
.config_intr = dp83td510_config_intr,
.handle_interrupt = dp83td510_handle_interrupt,
+ .get_sqi = dp83td510_get_sqi,
+ .get_sqi_max = dp83td510_get_sqi_max,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 22139901f01c..e78d0bf69bc3 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -209,6 +209,9 @@
#define PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_ BIT(1)
#define PTP_TSU_INT_STS_PTP_RX_TS_EN_ BIT(0)
+#define LAN8814_LED_CTRL_1 0x0
+#define LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_ BIT(6)
+
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
#define KSZ8081_CTRL1_MDIX_STAT BIT(4)
@@ -308,6 +311,10 @@ struct kszphy_priv {
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
+static const struct kszphy_type lan8814_type = {
+ .led_mode_reg = ~LAN8814_LED_CTRL_1,
+};
+
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
.has_broadcast_disable = true,
@@ -1688,6 +1695,30 @@ static int kszphy_suspend(struct phy_device *phydev)
return genphy_suspend(phydev);
}
+static void kszphy_parse_led_mode(struct phy_device *phydev)
+{
+ const struct kszphy_type *type = phydev->drv->driver_data;
+ const struct device_node *np = phydev->mdio.dev.of_node;
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ if (type && type->led_mode_reg) {
+ ret = of_property_read_u32(np, "micrel,led-mode",
+ &priv->led_mode);
+
+ if (ret)
+ priv->led_mode = -1;
+
+ if (priv->led_mode > 3) {
+ phydev_err(phydev, "invalid led mode: 0x%02x\n",
+ priv->led_mode);
+ priv->led_mode = -1;
+ }
+ } else {
+ priv->led_mode = -1;
+ }
+}
+
static int kszphy_resume(struct phy_device *phydev)
{
int ret;
@@ -1720,7 +1751,6 @@ static int kszphy_probe(struct phy_device *phydev)
const struct device_node *np = phydev->mdio.dev.of_node;
struct kszphy_priv *priv;
struct clk *clk;
- int ret;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1730,20 +1760,7 @@ static int kszphy_probe(struct phy_device *phydev)
priv->type = type;
- if (type && type->led_mode_reg) {
- ret = of_property_read_u32(np, "micrel,led-mode",
- &priv->led_mode);
- if (ret)
- priv->led_mode = -1;
-
- if (priv->led_mode > 3) {
- phydev_err(phydev, "invalid led mode: 0x%02x\n",
- priv->led_mode);
- priv->led_mode = -1;
- }
- } else {
- priv->led_mode = -1;
- }
+ kszphy_parse_led_mode(phydev);
clk = devm_clk_get(&phydev->mdio.dev, "rmii-ref");
/* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */
@@ -2815,8 +2832,23 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
return 0;
}
+static void lan8814_setup_led(struct phy_device *phydev, int val)
+{
+ int temp;
+
+ temp = lanphy_read_page_reg(phydev, 5, LAN8814_LED_CTRL_1);
+
+ if (val)
+ temp |= LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
+ else
+ temp &= ~LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
+
+ lanphy_write_page_reg(phydev, 5, LAN8814_LED_CTRL_1, temp);
+}
+
static int lan8814_config_init(struct phy_device *phydev)
{
+ struct kszphy_priv *lan8814 = phydev->priv;
int val;
/* Reset the PHY */
@@ -2835,6 +2867,9 @@ static int lan8814_config_init(struct phy_device *phydev)
val |= LAN8814_ALIGN_TX_A_B_SWAP;
lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+ if (lan8814->led_mode >= 0)
+ lan8814_setup_led(phydev, lan8814->led_mode);
+
return 0;
}
@@ -2855,6 +2890,7 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
static int lan8814_probe(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
struct kszphy_priv *priv;
u16 addr;
int err;
@@ -2863,10 +2899,12 @@ static int lan8814_probe(struct phy_device *phydev)
if (!priv)
return -ENOMEM;
- priv->led_mode = -1;
-
phydev->priv = priv;
+ priv->type = type;
+
+ kszphy_parse_led_mode(phydev);
+
/* Strap-in value for PHY address, below register read gives starting
* phy address value
*/
@@ -3068,6 +3106,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip INDY Gigabit Quad PHY",
.config_init = lan8814_config_init,
+ .driver_data = &lan8814_type,
.probe = lan8814_probe,
.soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 6c4da2f9e90a..5b99acf44337 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -8,7 +8,9 @@
#include <linux/module.h>
#include <linux/bitfield.h>
+#include <linux/hwmon.h>
#include <linux/phy.h>
+#include <linux/polynomial.h>
#include <linux/netdevice.h>
/* PHY ID */
@@ -64,6 +66,10 @@
#define VSPEC1_SGMII_ANEN_ANRS (VSPEC1_SGMII_CTRL_ANEN | \
VSPEC1_SGMII_CTRL_ANRS)
+/* Temperature sensor */
+#define VPSPEC1_TEMP_STA 0x0E
+#define VPSPEC1_TEMP_STA_DATA GENMASK(9, 0)
+
/* WoL */
#define VPSPEC2_WOL_CTL 0x0E06
#define VPSPEC2_WOL_AD01 0x0E08
@@ -80,6 +86,102 @@ static const struct {
{9, 0x73},
};
+#if IS_ENABLED(CONFIG_HWMON)
+/* The original translation formulae of the temperature (in degrees of Celsius)
+ * are as follows:
+ *
+ * T = -2.5761e-11*(N^4) + 9.7332e-8*(N^3) + -1.9165e-4*(N^2) +
+ * 3.0762e-1*(N^1) + -5.2156e1
+ *
+ * where [-52.156, 137.961]C and N = [0, 1023].
+ *
+ * They must be accordingly altered to be suitable for the integer arithmetics.
+ * The technique is called 'factor redistribution', which just makes sure the
+ * multiplications and divisions are made so to have a result of the operations
+ * within the integer numbers limit. In addition we need to translate the
+ * formulae to accept millidegrees of Celsius. Here what it looks like after
+ * the alterations:
+ *
+ * T = -25761e-12*(N^4) + 97332e-9*(N^3) + -191650e-6*(N^2) +
+ * 307620e-3*(N^1) + -52156
+ *
+ * where T = [-52156, 137961]mC and N = [0, 1023].
+ */
+static const struct polynomial poly_N_to_temp = {
+ .terms = {
+ {4, -25761, 1000, 1},
+ {3, 97332, 1000, 1},
+ {2, -191650, 1000, 1},
+ {1, 307620, 1000, 1},
+ {0, -52156, 1, 1}
+ }
+};
+
+static int gpy_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VPSPEC1_TEMP_STA);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENODATA;
+
+ *value = polynomial_calc(&poly_N_to_temp,
+ FIELD_GET(VPSPEC1_TEMP_STA_DATA, ret));
+
+ return 0;
+}
+
+static umode_t gpy_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static const struct hwmon_channel_info *gpy_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops gpy_hwmon_hwmon_ops = {
+ .is_visible = gpy_hwmon_is_visible,
+ .read = gpy_hwmon_read,
+};
+
+static const struct hwmon_chip_info gpy_hwmon_chip_info = {
+ .ops = &gpy_hwmon_hwmon_ops,
+ .info = gpy_hwmon_info,
+};
+
+static int gpy_hwmon_register(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device *hwmon_dev;
+ char *hwmon_name;
+
+ hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(hwmon_name))
+ return PTR_ERR(hwmon_name);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
+ phydev,
+ &gpy_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+#else
+static int gpy_hwmon_register(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
static int gpy_config_init(struct phy_device *phydev)
{
int ret;
@@ -109,6 +211,10 @@ static int gpy_probe(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = gpy_hwmon_register(phydev);
+ if (ret)
+ return ret;
+
phydev_info(phydev, "Firmware Version: 0x%04X (%s)\n", ret,
(ret & PHY_FWV_REL_MASK) ? "release" : "test");
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 9944cc501806..2a8195c50d14 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -444,15 +444,10 @@ static int tja11xx_hwmon_register(struct phy_device *phydev,
struct tja11xx_priv *priv)
{
struct device *dev = &phydev->mdio.dev;
- int i;
-
- priv->hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
- if (!priv->hwmon_name)
- return -ENOMEM;
- for (i = 0; priv->hwmon_name[i]; i++)
- if (hwmon_is_bad_char(priv->hwmon_name[i]))
- priv->hwmon_name[i] = '_';
+ priv->hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(priv->hwmon_name))
+ return PTR_ERR(priv->hwmon_name);
priv->hwmon_dev =
devm_hwmon_device_register_with_info(dev, priv->hwmon_name,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef62f357b76d..8d3ee3a6495b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include <linux/suspend.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include <net/sock.h>
@@ -976,6 +977,28 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
struct phy_driver *drv = phydev->drv;
irqreturn_t ret;
+ /* Wakeup interrupts may occur during a system sleep transition.
+ * Postpone handling until the PHY has resumed.
+ */
+ if (IS_ENABLED(CONFIG_PM_SLEEP) && phydev->irq_suspended) {
+ struct net_device *netdev = phydev->attached_dev;
+
+ if (netdev) {
+ struct device *parent = netdev->dev.parent;
+
+ if (netdev->wol_enabled)
+ pm_system_wakeup();
+ else if (device_may_wakeup(&netdev->dev))
+ pm_wakeup_dev_event(&netdev->dev, 0, true);
+ else if (parent && device_may_wakeup(parent))
+ pm_wakeup_dev_event(parent, 0, true);
+ }
+
+ phydev->irq_rerun = 1;
+ disable_irq_nosync(irq);
+ return IRQ_HANDLED;
+ }
+
mutex_lock(&phydev->lock);
ret = drv->handle_interrupt(phydev);
mutex_unlock(&phydev->lock);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7885bceff773..a74b320f5b27 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -278,6 +278,15 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
if (phydev->mac_managed_pm)
return 0;
+ /* Wakeup interrupts may occur during the system sleep transition when
+ * the PHY is inaccessible. Set flag to postpone handling until the PHY
+ * has resumed. Wait for concurrent interrupt handler to complete.
+ */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->irq_suspended = 1;
+ synchronize_irq(phydev->irq);
+ }
+
/* We must stop the state machine manually, otherwise it stops out of
* control, possibly with the phydev->lock held. Upon resume, netdev
* may call phy routines that try to grab the same lock, and that may
@@ -315,6 +324,20 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
if (ret < 0)
return ret;
no_resume:
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->irq_suspended = 0;
+ synchronize_irq(phydev->irq);
+
+ /* Rerun interrupts which were postponed by phy_interrupt()
+ * because they occurred during the system sleep transition.
+ */
+ if (phydev->irq_rerun) {
+ phydev->irq_rerun = 0;
+ enable_irq(phydev->irq);
+ irq_wake_thread(phydev->irq, phydev);
+ }
+ }
+
if (phydev->attached_dev && phydev->adjust_link)
phy_start_machine(phydev);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 066684b80919..9bd69328dc4d 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -43,7 +43,6 @@ struct phylink {
/* private: */
struct net_device *netdev;
const struct phylink_mac_ops *mac_ops;
- const struct phylink_pcs_ops *pcs_ops;
struct phylink_config *config;
struct phylink_pcs *pcs;
struct device *dev;
@@ -759,6 +758,18 @@ static void phylink_resolve_flow(struct phylink_link_state *state)
}
}
+static void phylink_pcs_poll_stop(struct phylink *pl)
+{
+ if (pl->cfg_link_an_mode == MLO_AN_INBAND)
+ del_timer(&pl->link_poll);
+}
+
+static void phylink_pcs_poll_start(struct phylink *pl)
+{
+ if (pl->pcs && pl->pcs->poll && pl->cfg_link_an_mode == MLO_AN_INBAND)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+}
+
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
@@ -779,8 +790,8 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl)
if (pl->link_config.an_enabled &&
phy_interface_mode_is_8023z(pl->link_config.interface) &&
phylink_autoneg_inband(pl->cur_link_an_mode)) {
- if (pl->pcs_ops)
- pl->pcs_ops->pcs_an_restart(pl->pcs);
+ if (pl->pcs)
+ pl->pcs->ops->pcs_an_restart(pl->pcs);
else if (pl->config->legacy_pre_march2020)
pl->mac_ops->mac_an_restart(pl->config);
}
@@ -790,6 +801,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
const struct phylink_link_state *state)
{
struct phylink_pcs *pcs = NULL;
+ bool pcs_changed = false;
int err;
phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
@@ -802,8 +814,12 @@ static void phylink_major_config(struct phylink *pl, bool restart,
pcs);
return;
}
+
+ pcs_changed = pcs && pl->pcs != pcs;
}
+ phylink_pcs_poll_stop(pl);
+
if (pl->mac_ops->mac_prepare) {
err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
state->interface);
@@ -817,27 +833,17 @@ static void phylink_major_config(struct phylink *pl, bool restart,
/* If we have a new PCS, switch to the new PCS after preparing the MAC
* for the change.
*/
- if (pcs) {
+ if (pcs_changed)
pl->pcs = pcs;
- pl->pcs_ops = pcs->ops;
-
- if (!pl->phylink_disable_state &&
- pl->cfg_link_an_mode == MLO_AN_INBAND) {
- if (pcs->poll)
- mod_timer(&pl->link_poll, jiffies + HZ);
- else
- del_timer(&pl->link_poll);
- }
- }
phylink_mac_config(pl, state);
- if (pl->pcs_ops) {
- err = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
- state->interface,
- state->advertising,
- !!(pl->link_config.pause &
- MLO_PAUSE_AN));
+ if (pl->pcs) {
+ err = pl->pcs->ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
+ state->interface,
+ state->advertising,
+ !!(pl->link_config.pause &
+ MLO_PAUSE_AN));
if (err < 0)
phylink_err(pl, "pcs_config failed: %pe\n",
ERR_PTR(err));
@@ -854,6 +860,8 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
}
+
+ phylink_pcs_poll_start(pl);
}
/*
@@ -869,7 +877,7 @@ static int phylink_change_inband_advert(struct phylink *pl)
if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
return 0;
- if (!pl->pcs_ops && pl->config->legacy_pre_march2020) {
+ if (!pl->pcs && pl->config->legacy_pre_march2020) {
/* Legacy method */
phylink_mac_config(pl, &pl->link_config);
phylink_mac_pcs_an_restart(pl);
@@ -886,10 +894,11 @@ static int phylink_change_inband_advert(struct phylink *pl)
* restart negotiation if the pcs_config() helper indicates that
* the programmed advertisement has changed.
*/
- ret = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
- pl->link_config.interface,
- pl->link_config.advertising,
- !!(pl->link_config.pause & MLO_PAUSE_AN));
+ ret = pl->pcs->ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
+ pl->link_config.interface,
+ pl->link_config.advertising,
+ !!(pl->link_config.pause &
+ MLO_PAUSE_AN));
if (ret < 0)
return ret;
@@ -918,8 +927,8 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->an_complete = 0;
state->link = 1;
- if (pl->pcs_ops)
- pl->pcs_ops->pcs_get_state(pl->pcs, state);
+ if (pl->pcs)
+ pl->pcs->ops->pcs_get_state(pl->pcs, state);
else if (pl->mac_ops->mac_pcs_get_state &&
pl->config->legacy_pre_march2020)
pl->mac_ops->mac_pcs_get_state(pl->config, state);
@@ -992,8 +1001,8 @@ static void phylink_link_up(struct phylink *pl,
pl->cur_interface = link_state.interface;
- if (pl->pcs_ops && pl->pcs_ops->pcs_link_up)
- pl->pcs_ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
+ if (pl->pcs && pl->pcs->ops->pcs_link_up)
+ pl->pcs->ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
pl->cur_interface,
link_state.speed, link_state.duplex);
@@ -1115,7 +1124,7 @@ static void phylink_resolve(struct work_struct *w)
}
phylink_major_config(pl, false, &link_state);
pl->link_config.interface = link_state.interface;
- } else if (!pl->pcs_ops && pl->config->legacy_pre_march2020) {
+ } else if (!pl->pcs && pl->config->legacy_pre_march2020) {
/* The interface remains unchanged, only the speed,
* duplex or pause settings have changed. Call the
* old mac_config() method to configure the MAC/PCS
@@ -2991,6 +3000,7 @@ int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
adv |= ADVERTISE_1000XPSE_ASYM;
return adv;
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
return 0x0001;
default:
/* Nothing to do for other modes */
@@ -3030,7 +3040,9 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
/* Ensure ISOLATE bit is disabled */
if (mode == MLO_AN_INBAND &&
- linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising))
+ (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_QSGMII ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)))
bmcr = BMCR_ANENABLE;
else
bmcr = 0;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 9a5d5a10560f..63f90fe9a4d2 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1290,7 +1290,7 @@ static const struct hwmon_chip_info sfp_hwmon_chip_info = {
static void sfp_hwmon_probe(struct work_struct *work)
{
struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
- int err, i;
+ int err;
/* hwmon interface needs to access 16bit registers in atomic way to
* guarantee coherency of the diagnostic monitoring data. If it is not
@@ -1318,16 +1318,12 @@ static void sfp_hwmon_probe(struct work_struct *work)
return;
}
- sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL);
- if (!sfp->hwmon_name) {
+ sfp->hwmon_name = hwmon_sanitize_name(dev_name(sfp->dev));
+ if (IS_ERR(sfp->hwmon_name)) {
dev_err(sfp->dev, "out of memory for hwmon name\n");
return;
}
- for (i = 0; sfp->hwmon_name[i]; i++)
- if (hwmon_is_bad_char(sfp->hwmon_name[i]))
- sfp->hwmon_name[i] = '_';
-
sfp->hwmon_dev = hwmon_device_register_with_info(sfp->dev,
sfp->hwmon_name, sfp,
&sfp_hwmon_chip_info,
@@ -2516,7 +2512,7 @@ static int sfp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sfp);
- err = devm_add_action(sfp->dev, sfp_cleanup, sfp);
+ err = devm_add_action_or_reset(sfp->dev, sfp_cleanup, sfp);
if (err < 0)
return err;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 1b54684b68a0..69423b8965b3 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -110,7 +110,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
struct smsc_phy_priv *priv = phydev->priv;
int rc;
- if (!priv->energy_enable)
+ if (!priv->energy_enable || phydev->irq != PHY_POLL)
return 0;
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -121,10 +121,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
/* Enable energy detect mode for this SMSC Transceivers */
rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
- if (rc < 0)
- return rc;
-
- return smsc_phy_ack_interrupt(phydev);
+ return rc;
}
static int smsc_phy_reset(struct phy_device *phydev)
@@ -146,11 +143,6 @@ static int smsc_phy_reset(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
-static int lan911x_config_init(struct phy_device *phydev)
-{
- return smsc_phy_ack_interrupt(phydev);
-}
-
static int lan87xx_config_aneg(struct phy_device *phydev)
{
int rc;
@@ -210,6 +202,8 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev)
* response on link pulses to detect presence of plugged Ethernet cable.
* The Energy Detect Power-Down mode is enabled again in the end of procedure to
* save approximately 220 mW of power if cable is unplugged.
+ * The workaround is only applicable to poll mode. Energy Detect Power-Down may
+ * not be used in interrupt mode lest link change detection becomes unreliable.
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
@@ -217,7 +211,7 @@ static int lan87xx_read_status(struct phy_device *phydev)
int err = genphy_read_status(phydev);
- if (!phydev->link && priv->energy_enable) {
+ if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
/* Disable EDPD to wake up PHY */
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
@@ -418,9 +412,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
- /* basic functions */
- .config_init = lan911x_config_init,
-
/* IRQ related */
.config_intr = smsc_phy_config_intr,
.handle_interrupt = smsc_phy_handle_interrupt,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 87a635aac008..259b2b84b2b3 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -273,6 +273,12 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
}
}
+static void tun_napi_enable(struct tun_file *tfile)
+{
+ if (tfile->napi_enabled)
+ napi_enable(&tfile->napi);
+}
+
static void tun_napi_disable(struct tun_file *tfile)
{
if (tfile->napi_enabled)
@@ -634,7 +640,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
if (tun && clean) {
- tun_napi_disable(tfile);
+ if (!tfile->detached)
+ tun_napi_disable(tfile);
tun_napi_del(tfile);
}
@@ -653,8 +660,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
if (clean) {
RCU_INIT_POINTER(tfile->tun, NULL);
sock_put(&tfile->sk);
- } else
+ } else {
tun_disable_queue(tun, tfile);
+ tun_napi_disable(tfile);
+ }
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
@@ -727,6 +736,7 @@ static void tun_detach_all(struct net_device *dev)
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+ tun_napi_del(tfile);
tun_enable_queue(tfile);
tun_queue_purge(tfile);
xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -807,6 +817,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
if (tfile->detached) {
tun_enable_queue(tfile);
+ tun_napi_enable(tfile);
} else {
sock_hold(&tfile->sk);
tun_napi_init(tun, tfile, napi, napi_frags);
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 2c81236c6c7c..21c1ca275cc4 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -126,8 +126,7 @@
AX_MEDIUM_RE)
#define AX88772_MEDIUM_DEFAULT \
- (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
- AX_MEDIUM_TFC | AX_MEDIUM_PS | \
+ (AX_MEDIUM_FD | AX_MEDIUM_PS | \
AX_MEDIUM_AC | AX_MEDIUM_RE)
/* AX88772 & AX88178 RX_CTL values */
@@ -213,9 +212,6 @@ void asix_rx_fixup_common_free(struct asix_common_private *dp);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags);
-int asix_set_sw_mii(struct usbnet *dev, int in_pm);
-int asix_set_hw_mii(struct usbnet *dev, int in_pm);
-
int asix_read_phy_addr(struct usbnet *dev, bool internal);
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 632fa6c1d5e3..9ea91c3ff045 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -68,6 +68,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
+static int asix_set_sw_mii(struct usbnet *dev, int in_pm)
+{
+ int ret;
+
+ ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL, in_pm);
+
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+static int asix_set_hw_mii(struct usbnet *dev, int in_pm)
+{
+ int ret;
+
+ ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL, in_pm);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
static int asix_check_host_enable(struct usbnet *dev, int in_pm)
{
int i, ret;
@@ -297,25 +318,6 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
return skb;
}
-int asix_set_sw_mii(struct usbnet *dev, int in_pm)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL, in_pm);
-
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable software MII access\n");
- return ret;
-}
-
-int asix_set_hw_mii(struct usbnet *dev, int in_pm)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL, in_pm);
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable hardware MII access\n");
- return ret;
-}
-
int asix_read_phy_addr(struct usbnet *dev, bool internal)
{
int ret, offset;
@@ -431,6 +433,7 @@ void asix_adjust_link(struct net_device *netdev)
asix_write_medium_mode(dev, mode, 0);
phy_print_status(phydev);
+ usbnet_link_change(dev, phydev->link, 0);
}
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 4704ed6f00ef..ac2d400d1d6c 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1472,6 +1472,42 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* are bundled into this buffer and where we can find an array of
* per-packet metadata (which contains elements encoded into u16).
*/
+
+ /* SKB contents for current firmware:
+ * <packet 1> <padding>
+ * ...
+ * <packet N> <padding>
+ * <per-packet metadata entry 1> <dummy header>
+ * ...
+ * <per-packet metadata entry N> <dummy header>
+ * <padding2> <rx_hdr>
+ *
+ * where:
+ * <packet N> contains pkt_len bytes:
+ * 2 bytes of IP alignment pseudo header
+ * packet received
+ * <per-packet metadata entry N> contains 4 bytes:
+ * pkt_len and fields AX_RXHDR_*
+ * <padding> 0-7 bytes to terminate at
+ * 8 bytes boundary (64-bit).
+ * <padding2> 4 bytes to make rx_hdr terminate at
+ * 8 bytes boundary (64-bit)
+ * <dummy-header> contains 4 bytes:
+ * pkt_len=0 and AX_RXHDR_DROP_ERR
+ * <rx-hdr> contains 4 bytes:
+ * pkt_cnt and hdr_off (offset of
+ * <per-packet metadata entry 1>)
+ *
+ * pkt_cnt is number of entrys in the per-packet metadata.
+ * In current firmware there is 2 entrys per packet.
+ * The first points to the packet and the
+ * second is a dummy header.
+ * This was done probably to align fields in 64-bit and
+ * maintain compatibility with old firmware.
+ * This code assumes that <dummy header> and <padding2> are
+ * optional.
+ */
+
if (skb->len < 4)
return 0;
skb_trim(skb, skb->len - 4);
@@ -1485,51 +1521,66 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Make sure that the bounds of the metadata array are inside the SKB
* (and in front of the counter at the end).
*/
- if (pkt_cnt * 2 + hdr_off > skb->len)
+ if (pkt_cnt * 4 + hdr_off > skb->len)
return 0;
pkt_hdr = (u32 *)(skb->data + hdr_off);
/* Packets must not overlap the metadata array */
skb_trim(skb, hdr_off);
- for (; ; pkt_cnt--, pkt_hdr++) {
+ for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) {
+ u16 pkt_len_plus_padd;
u16 pkt_len;
le32_to_cpus(pkt_hdr);
pkt_len = (*pkt_hdr >> 16) & 0x1fff;
+ pkt_len_plus_padd = (pkt_len + 7) & 0xfff8;
- if (pkt_len > skb->len)
+ /* Skip dummy header used for alignment
+ */
+ if (pkt_len == 0)
+ continue;
+
+ if (pkt_len_plus_padd > skb->len)
return 0;
/* Check CRC or runt packet */
- if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
- pkt_len >= 2 + ETH_HLEN) {
- bool last = (pkt_cnt == 0);
-
- if (last) {
- ax_skb = skb;
- } else {
- ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (!ax_skb)
- return 0;
- }
- ax_skb->len = pkt_len;
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
- skb_set_tail_pointer(ax_skb, ax_skb->len);
- ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
- ax88179_rx_checksum(ax_skb, pkt_hdr);
+ if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) ||
+ pkt_len < 2 + ETH_HLEN) {
+ dev->net->stats.rx_errors++;
+ skb_pull(skb, pkt_len_plus_padd);
+ continue;
+ }
- if (last)
- return 1;
+ /* last packet */
+ if (pkt_len_plus_padd == skb->len) {
+ skb_trim(skb, pkt_len);
- usbnet_skb_return(dev, ax_skb);
+ /* Skip IP alignment pseudo header */
+ skb_pull(skb, 2);
+
+ skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
+ ax88179_rx_checksum(skb, pkt_hdr);
+ return 1;
}
- /* Trim this packet away from the SKB */
- if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
+ ax_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!ax_skb)
return 0;
+ skb_trim(ax_skb, pkt_len);
+
+ /* Skip IP alignment pseudo header */
+ skb_pull(ax_skb, 2);
+
+ skb->truesize = pkt_len_plus_padd +
+ SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ ax88179_rx_checksum(ax_skb, pkt_hdr);
+ usbnet_skb_return(dev, ax_skb);
+
+ skb_pull(skb, pkt_len_plus_padd);
}
+
+ return 0;
}
static struct sk_buff *
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index e7fe9c0f63a9..1e5c15363a2e 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -280,7 +280,7 @@ static void catc_irq_done(struct urb *urb)
struct catc *catc = urb->context;
u8 *data = urb->transfer_buffer;
int status = urb->status;
- unsigned int hasdata = 0, linksts = LinkNoChange;
+ unsigned int hasdata, linksts = LinkNoChange;
int res;
if (!catc->is_f5u011) {
@@ -781,7 +781,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
intf->altsetting->desc.bInterfaceNumber, 1)) {
dev_err(dev, "Can't set altsetting 1.\n");
ret = -EIO;
- goto fail_mem;;
+ goto fail_mem;
}
netdev = alloc_etherdev(sizeof(struct catc));
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 359ea0d10e59..baa9b14b1644 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -218,7 +218,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(!skb2))
goto next;
skb_trim(skb2, len);
- put_unaligned_le16(BIT(15) | (1 << 11) | len,
+ put_unaligned_le16(BIT(15) | BIT(11) | len,
skb_push(skb2, 2));
eem_linkcmd(dev, skb2);
break;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 35110814ba22..bfb58c91db04 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -71,22 +71,22 @@ struct smsc95xx_priv {
struct fwnode_handle *irqfwnode;
struct mii_bus *mdiobus;
struct phy_device *phydev;
+ struct task_struct *pm_task;
};
static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
-static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data, int in_pm)
+static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 buf;
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
- BUG_ON(!dev);
-
- if (!in_pm)
+ if (current != pdata->pm_task)
fn = usbnet_read_cmd;
else
fn = usbnet_read_cmd_nopm;
@@ -107,16 +107,15 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
return ret;
}
-static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
- u32 data, int in_pm)
+static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 buf;
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
- BUG_ON(!dev);
-
- if (!in_pm)
+ if (current != pdata->pm_task)
fn = usbnet_write_cmd;
else
fn = usbnet_write_cmd_nopm;
@@ -134,41 +133,16 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
return ret;
}
-static int __must_check smsc95xx_read_reg_nopm(struct usbnet *dev, u32 index,
- u32 *data)
-{
- return __smsc95xx_read_reg(dev, index, data, 1);
-}
-
-static int __must_check smsc95xx_write_reg_nopm(struct usbnet *dev, u32 index,
- u32 data)
-{
- return __smsc95xx_write_reg(dev, index, data, 1);
-}
-
-static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data)
-{
- return __smsc95xx_read_reg(dev, index, data, 0);
-}
-
-static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
- u32 data)
-{
- return __smsc95xx_write_reg(dev, index, data, 0);
-}
-
/* Loop until the read is completed with timeout
* called with phy_mutex held */
-static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
- int in_pm)
+static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
{
unsigned long start_time = jiffies;
u32 val;
int ret;
do {
- ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
+ ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
if (ret < 0) {
/* Ignore -ENODEV error during disconnect() */
if (ret == -ENODEV)
@@ -189,8 +163,7 @@ static u32 mii_address_cmd(int phy_id, int idx, u16 op)
return (phy_id & 0x1f) << 11 | (idx & 0x1f) << 6 | op;
}
-static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
- int in_pm)
+static int smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx)
{
u32 val, addr;
int ret;
@@ -198,7 +171,7 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
@@ -206,20 +179,20 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
/* set the address, index & direction (read from PHY) */
addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
- ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+ ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
if (ret < 0) {
if (ret != -ENODEV)
netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
goto done;
}
- ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
+ ret = smsc95xx_read_reg(dev, MII_DATA, &val);
if (ret < 0) {
if (ret != -ENODEV)
netdev_warn(dev->net, "Error reading MII_DATA\n");
@@ -237,8 +210,8 @@ done:
return ret;
}
-static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
- int idx, int regval, int in_pm)
+static void smsc95xx_mdio_write(struct usbnet *dev, int phy_id, int idx,
+ int regval)
{
u32 val, addr;
int ret;
@@ -246,14 +219,14 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
}
val = regval;
- ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
+ ret = smsc95xx_write_reg(dev, MII_DATA, val);
if (ret < 0) {
if (ret != -ENODEV)
netdev_warn(dev->net, "Error writing MII_DATA\n");
@@ -262,14 +235,14 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
/* set the address, index & direction (write to PHY) */
addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
- ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+ ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
if (ret < 0) {
if (ret != -ENODEV)
netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
goto done;
@@ -279,25 +252,11 @@ done:
mutex_unlock(&dev->phy_mutex);
}
-static int smsc95xx_mdio_read_nopm(struct usbnet *dev, int idx)
-{
- struct smsc95xx_priv *pdata = dev->driver_priv;
-
- return __smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, idx, 1);
-}
-
-static void smsc95xx_mdio_write_nopm(struct usbnet *dev, int idx, int regval)
-{
- struct smsc95xx_priv *pdata = dev->driver_priv;
-
- __smsc95xx_mdio_write(dev, pdata->phydev->mdio.addr, idx, regval, 1);
-}
-
static int smsc95xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
{
struct usbnet *dev = bus->priv;
- return __smsc95xx_mdio_read(dev, phy_id, idx, 0);
+ return smsc95xx_mdio_read(dev, phy_id, idx);
}
static int smsc95xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
@@ -305,7 +264,7 @@ static int smsc95xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
{
struct usbnet *dev = bus->priv;
- __smsc95xx_mdio_write(dev, phy_id, idx, regval, 0);
+ smsc95xx_mdio_write(dev, phy_id, idx, regval);
return 0;
}
@@ -865,7 +824,7 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
}
/* Starts the Receive path */
-static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
+static int smsc95xx_start_rx_path(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
@@ -874,7 +833,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
pdata->mac_cr |= MAC_CR_RXEN_;
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- return __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
+ return smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
}
static int smsc95xx_reset(struct usbnet *dev)
@@ -1057,7 +1016,7 @@ static int smsc95xx_reset(struct usbnet *dev)
return ret;
}
- ret = smsc95xx_start_rx_path(dev, 0);
+ ret = smsc95xx_start_rx_path(dev);
if (ret < 0) {
netdev_warn(dev->net, "Failed to start RX path\n");
return ret;
@@ -1291,16 +1250,17 @@ static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
return crc << ((filter % 2) * 16);
}
-static int smsc95xx_link_ok_nopm(struct usbnet *dev)
+static int smsc95xx_link_ok(struct usbnet *dev)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
/* first, a dummy read, needed to latch some MII phys */
- ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
+ ret = smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, MII_BMSR);
if (ret < 0)
return ret;
- ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
+ ret = smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, MII_BMSR);
if (ret < 0)
return ret;
@@ -1313,14 +1273,14 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
u32 val;
int ret;
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
val |= PM_CTL_SUS_MODE_0;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1332,12 +1292,12 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
if (pdata->wolopts & WAKE_PHY)
val |= PM_CTL_WUPS_ED_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
/* read back PM_CTRL */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
@@ -1349,34 +1309,34 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
static int smsc95xx_enter_suspend1(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
+ int ret, phy_id = pdata->phydev->mdio.addr;
u32 val;
- int ret;
/* reconfigure link pulse detection timing for
* compatibility with non-standard link partners
*/
if (pdata->features & FEATURE_PHY_NLP_CROSSOVER)
- smsc95xx_mdio_write_nopm(dev, PHY_EDPD_CONFIG,
- PHY_EDPD_CONFIG_DEFAULT);
+ smsc95xx_mdio_write(dev, phy_id, PHY_EDPD_CONFIG,
+ PHY_EDPD_CONFIG_DEFAULT);
/* enable energy detect power-down mode */
- ret = smsc95xx_mdio_read_nopm(dev, PHY_MODE_CTRL_STS);
+ ret = smsc95xx_mdio_read(dev, phy_id, PHY_MODE_CTRL_STS);
if (ret < 0)
return ret;
ret |= MODE_CTRL_STS_EDPWRDOWN_;
- smsc95xx_mdio_write_nopm(dev, PHY_MODE_CTRL_STS, ret);
+ smsc95xx_mdio_write(dev, phy_id, PHY_MODE_CTRL_STS, ret);
/* enter SUSPEND1 mode */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_1;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1384,7 +1344,7 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
val &= ~PM_CTL_WUPS_;
val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1399,14 +1359,14 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev)
u32 val;
int ret;
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_2;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1421,7 +1381,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
u32 val;
int ret;
- ret = smsc95xx_read_reg_nopm(dev, RX_FIFO_INF, &val);
+ ret = smsc95xx_read_reg(dev, RX_FIFO_INF, &val);
if (ret < 0)
return ret;
@@ -1430,14 +1390,14 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
return -EBUSY;
}
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_3 | PM_CTL_RES_CLR_WKP_STS;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1445,7 +1405,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
val &= ~PM_CTL_WUPS_;
val |= PM_CTL_WUPS_WOL_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1490,9 +1450,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
u32 val, link_up;
int ret;
+ pdata->pm_task = current;
+
ret = usbnet_suspend(intf, message);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_suspend error\n");
+ pdata->pm_task = NULL;
return ret;
}
@@ -1501,8 +1464,7 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
pdata->suspend_flags = 0;
}
- /* determine if link is up using only _nopm functions */
- link_up = smsc95xx_link_ok_nopm(dev);
+ link_up = smsc95xx_link_ok(dev);
if (message.event == PM_EVENT_AUTO_SUSPEND &&
(pdata->features & FEATURE_REMOTE_WAKEUP)) {
@@ -1519,23 +1481,23 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
netdev_info(dev->net, "entering SUSPEND2 mode\n");
/* disable energy detect (link up) & wake up events */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
goto done;
val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
goto done;
@@ -1626,7 +1588,7 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
}
for (i = 0; i < (wuff_filter_count * 4); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, filter_mask[i]);
if (ret < 0) {
kfree(filter_mask);
goto done;
@@ -1635,50 +1597,50 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
kfree(filter_mask);
for (i = 0; i < (wuff_filter_count / 4); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, command[i]);
if (ret < 0)
goto done;
}
for (i = 0; i < (wuff_filter_count / 4); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, offset[i]);
if (ret < 0)
goto done;
}
for (i = 0; i < (wuff_filter_count / 2); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, crc[i]);
if (ret < 0)
goto done;
}
/* clear any pending pattern match packet status */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
val |= WUCSR_WUFR_;
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
}
if (pdata->wolopts & WAKE_MAGIC) {
/* clear any pending magic packet status */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
val |= WUCSR_MPR_;
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
}
/* enable/disable wakeup sources */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
@@ -1698,12 +1660,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
val &= ~WUCSR_MPEN_;
}
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
/* enable wol wakeup source */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
goto done;
@@ -1713,12 +1675,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
if (pdata->wolopts & WAKE_PHY)
val |= PM_CTL_ED_EN_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
goto done;
/* enable receiver to enable frame reception */
- smsc95xx_start_rx_path(dev, 1);
+ smsc95xx_start_rx_path(dev);
/* some wol options are enabled, so enter SUSPEND0 */
netdev_info(dev->net, "entering SUSPEND0 mode\n");
@@ -1732,6 +1694,7 @@ done:
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
+ pdata->pm_task = NULL;
return ret;
}
@@ -1752,29 +1715,31 @@ static int smsc95xx_resume(struct usb_interface *intf)
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
+ pdata->pm_task = current;
+
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
- return ret;
+ goto done;
val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
- return ret;
+ goto done;
/* clear wake-up status */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
- return ret;
+ goto done;
val &= ~PM_CTL_WOL_EN_;
val |= PM_CTL_WUPS_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
- return ret;
+ goto done;
}
phy_init_hw(pdata->phydev);
@@ -1783,15 +1748,20 @@ static int smsc95xx_resume(struct usb_interface *intf)
if (ret < 0)
netdev_warn(dev->net, "usbnet_resume error\n");
+done:
+ pdata->pm_task = NULL;
return ret;
}
static int smsc95xx_reset_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
+ pdata->pm_task = current;
ret = smsc95xx_reset(dev);
+ pdata->pm_task = NULL;
if (ret < 0)
return ret;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index dc79811535c2..e415465068ca 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -17,9 +17,6 @@
* issues can usefully be addressed by this framework.
*/
-// #define DEBUG // error path messages, extra info
-// #define VERBOSE // more; success messages
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -849,13 +846,11 @@ int usbnet_stop (struct net_device *net)
mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
- /* deferred work (task, timer, softirq) must also stop.
- * can't flush_scheduled_work() until we drop rtnl (later),
- * else workers could deadlock; so make workers a NOP.
- */
+ /* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ cancel_work_sync(&dev->kevent);
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1619,8 +1614,6 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
- cancel_work_sync(&dev->kevent);
-
usb_scuttle_anchored_urbs(&dev->deferred);
if (dev->driver_info->unbind)
@@ -2004,7 +1997,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, size);
if (size) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmalloc(size, GFP_NOIO);
if (!buf)
goto out;
}
@@ -2036,7 +2029,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, size);
if (data) {
- buf = kmemdup(data, size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_NOIO);
if (!buf)
goto out;
} else {
@@ -2137,7 +2130,7 @@ static void usbnet_async_cmd_cb(struct urb *urb)
int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, const void *data, u16 size)
{
- struct usb_ctrlrequest *req = NULL;
+ struct usb_ctrlrequest *req;
struct urb *urb;
int err = -ENOMEM;
void *buf = NULL;
@@ -2155,7 +2148,7 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
if (!buf) {
netdev_err(dev->net, "Error allocating buffer"
" in %s!\n", __func__);
- goto fail_free;
+ goto fail_free_urb;
}
}
@@ -2179,14 +2172,21 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
if (err < 0) {
netdev_err(dev->net, "Error submitting the control"
" message: status=%d\n", err);
- goto fail_free;
+ goto fail_free_all;
}
return 0;
+fail_free_all:
+ kfree(req);
fail_free_buf:
kfree(buf);
-fail_free:
- kfree(req);
+ /*
+ * avoid a double free
+ * needed because the flag can be set only
+ * after filling the URB
+ */
+ urb->transfer_flags = 0;
+fail_free_urb:
usb_free_urb(urb);
fail:
return err;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 466da01ba2e3..2cb833b3006a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -312,6 +312,7 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct netdev_queue *queue = NULL;
struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
@@ -329,6 +330,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
+ queue = netdev_get_tx_queue(dev, rxq);
/* The napi pointer is available when an XDP program is
* attached or when GRO is enabled
@@ -340,6 +342,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+ if (queue)
+ txq_trans_cond_update(queue);
if (!use_napi)
dev_lstats_add(dev, length);
} else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index db05b5e930be..356cf8dd4164 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2768,7 +2768,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
static void virtnet_freeze_down(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int i;
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
@@ -2776,14 +2775,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
-
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- }
- }
+ if (netif_running(vi->dev))
+ virtnet_close(vi->dev);
}
static int init_vqs(struct virtnet_info *vi);
@@ -2791,7 +2784,7 @@ static int init_vqs(struct virtnet_info *vi);
static int virtnet_restore_up(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int err, i;
+ int err;
err = init_vqs(vi);
if (err)
@@ -2800,15 +2793,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
if (netif_running(vi->dev)) {
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
-
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ err = virtnet_open(vi->dev);
+ if (err)
+ return err;
}
netif_tx_lock_bh(vi->dev);
@@ -3655,14 +3642,20 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
- err = register_netdev(dev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
+ err = register_netdevice(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
+ rtnl_unlock();
goto free_failover;
}
virtio_device_ready(vdev);
+ rtnl_unlock();
+
err = virtnet_cpu_notif_add(vi);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 5704defd7be1..237cbd5c5060 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1782,9 +1782,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
}
/* Header Length = MAC header len + IP header len + TCP header len*/
- hdrlen = ETH_HLEN +
- (int)skb_network_header_len(skb) +
- tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
switch (gso_type) {
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index fc61a4418737..a256695fc89e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1201,9 +1201,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
}
mss = skb_shinfo(skb)->gso_size;
- hdrlen = skb_transport_header(skb) -
- skb_mac_header(skb) +
- tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
skb_shinfo(skb)->gso_segs =
DIV_ROUND_UP(skb->len - hdrlen, mss);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8c0b9546d5a2..2409007f1fd9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
+static bool __read_mostly xennet_trusted = true;
+module_param_named(trusted, xennet_trusted, bool, 0644);
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
+
#define XENNET_TIMEOUT (5 * HZ)
static const struct ethtool_ops xennet_ethtool_ops;
@@ -173,6 +177,9 @@ struct netfront_info {
/* Is device behaving sane? */
bool broken;
+ /* Should skbs be bounced into a zeroed buffer? */
+ bool bounce;
+
atomic_t rx_gso_checksum_fixup;
};
@@ -271,7 +278,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
if (unlikely(!skb))
return NULL;
- page = page_pool_dev_alloc_pages(queue->page_pool);
+ page = page_pool_alloc_pages(queue->page_pool,
+ GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
if (unlikely(!page)) {
kfree_skb(skb);
return NULL;
@@ -665,6 +673,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
return nxmit;
}
+struct sk_buff *bounce_skb(const struct sk_buff *skb)
+{
+ unsigned int headerlen = skb_headroom(skb);
+ /* Align size to allocate full pages and avoid contiguous data leaks */
+ unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
+ XEN_PAGE_SIZE);
+ struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
+
+ if (!n)
+ return NULL;
+
+ if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
+ WARN_ONCE(1, "misaligned skb allocated\n");
+ kfree_skb(n);
+ return NULL;
+ }
+
+ /* Set the data pointer */
+ skb_reserve(n, headerlen);
+ /* Set the tail pointer and length */
+ skb_put(n, skb->len);
+
+ BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
+
+ skb_copy_header(n, skb);
+ return n;
+}
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
@@ -718,9 +753,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* The first req should be at least ETH_HLEN size or the packet will be
* dropped by netback.
+ *
+ * If the backend is not trusted bounce all data to zeroed pages to
+ * avoid exposing contiguous data on the granted page not belonging to
+ * the skb.
*/
- if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
- nskb = skb_copy(skb, GFP_ATOMIC);
+ if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+ nskb = bounce_skb(skb);
if (!nskb)
goto drop;
dev_consume_skb_any(skb);
@@ -1053,8 +1092,10 @@ static int xennet_get_responses(struct netfront_queue *queue,
}
}
rcu_read_unlock();
-next:
+
__skb_queue_tail(list, skb);
+
+next:
if (!(rx->flags & XEN_NETRXF_more_data))
break;
@@ -2214,6 +2255,10 @@ static int talk_to_netback(struct xenbus_device *dev,
info->netdev->irq = 0;
+ /* Check if backend is trusted. */
+ info->bounce = !xennet_trusted ||
+ !xenbus_read_unsigned(dev->nodename, "trusted", 1);
+
/* Check if backend supports multiple queues */
max_queues = xenbus_read_unsigned(info->xbdev->otherend,
"multi-queue-max-queues", 1);
@@ -2381,6 +2426,9 @@ static int xennet_connect(struct net_device *dev)
return err;
if (np->netback_has_xdp_headroom)
pr_info("backend supports XDP headroom\n");
+ if (np->bounce)
+ dev_info(&np->xbdev->dev,
+ "bouncing transmitted data to zeroed pages\n");
/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;